1986 lines
93 KiB
Python
1986 lines
93 KiB
Python
# handlingTasks.py
|
||
# Refactored for clarity and consolidation
|
||
|
||
import uuid
|
||
import asyncio
|
||
import logging
|
||
import json
|
||
import time
|
||
from typing import Dict, Any, Optional, List, Union
|
||
from datetime import datetime, UTC
|
||
from modules.datamodels.datamodelWorkflow import (
|
||
TaskStep,
|
||
TaskContext,
|
||
ReviewResult,
|
||
TaskPlan,
|
||
TaskResult,
|
||
ReviewContext,
|
||
TaskStatus,
|
||
ActionResult
|
||
)
|
||
from modules.datamodels.datamodelChat import (
|
||
WorkflowResult,
|
||
ChatWorkflow,
|
||
ChatMessage,
|
||
ChatDocument
|
||
)
|
||
from modules.workflows.processing.executionState import TaskExecutionState
|
||
from modules.workflows.processing.promptFactory import (
|
||
createTaskPlanningPrompt,
|
||
createActionDefinitionPrompt,
|
||
createResultReviewPrompt,
|
||
createActionSelectionPrompt,
|
||
createActionParameterPrompt,
|
||
createRefinementPrompt
|
||
)
|
||
from modules.workflows.processing.promptFactoryPlaceholders import (
|
||
createTaskPlanningPromptTemplate,
|
||
createActionDefinitionPromptTemplate,
|
||
createActionSelectionPromptTemplate,
|
||
createActionParameterPromptTemplate,
|
||
createRefinementPromptTemplate,
|
||
createResultReviewPromptTemplate,
|
||
extractUserPrompt,
|
||
extractAvailableDocuments,
|
||
extractWorkflowHistory,
|
||
extractAvailableMethods,
|
||
extractUserLanguage,
|
||
extractReviewContent
|
||
)
|
||
from modules.services.serviceDocument.mainServiceDocumentGeneration import DocumentGenerationService
|
||
from modules.workflows.processing.promptFactory import methods
|
||
from modules.workflows.processing.executionState import should_continue
|
||
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
class WorkflowStoppedException(Exception):
|
||
"""Exception raised when a workflow is stopped by the user."""
|
||
pass
|
||
|
||
class HandlingTasks:
|
||
def __init__(self, services, workflow=None):
|
||
self.services = services
|
||
self.workflow = workflow
|
||
self.documentGenerator = DocumentGenerationService(self.services.center)
|
||
|
||
def _checkWorkflowStopped(self):
|
||
"""
|
||
Check if workflow has been stopped by user and raise exception if so.
|
||
This function centralizes all workflow stop checking logic to avoid code duplication.
|
||
"""
|
||
try:
|
||
# Get the current workflow status from the database to avoid stale data
|
||
current_workflow = services.chatInterface.getWorkflow(self.service.workflow.id)
|
||
if current_workflow and current_workflow.status == "stopped":
|
||
logger.info("Workflow stopped by user, aborting execution")
|
||
raise WorkflowStoppedException("Workflow was stopped by user")
|
||
except WorkflowStoppedException:
|
||
# Re-raise the WorkflowStoppedException immediately
|
||
raise
|
||
except Exception as e:
|
||
# If we can't get the current status due to other database issues, fall back to the in-memory object
|
||
logger.warning(f"Could not check current workflow status from database: {str(e)}")
|
||
if self.service.workflow.status == "stopped":
|
||
logger.info("Workflow stopped by user (from in-memory object), aborting execution")
|
||
raise WorkflowStoppedException("Workflow was stopped by user")
|
||
|
||
async def generateTaskPlan(self, userInput: str, workflow) -> TaskPlan:
|
||
"""Generate a high-level task plan for the workflow."""
|
||
try:
|
||
# Check workflow status before generating task plan
|
||
self._checkWorkflowStopped()
|
||
|
||
logger.info(f"=== STARTING TASK PLAN GENERATION ===")
|
||
logger.info(f"Workflow ID: {workflow.id}")
|
||
logger.info(f"User Input: {userInput}")
|
||
|
||
# Check workflow status before calling AI service
|
||
self._checkWorkflowStopped()
|
||
|
||
# Create proper context object for task planning
|
||
# For task planning, we need to create a minimal TaskStep since TaskContext requires it
|
||
planning_task_step = TaskStep(
|
||
id="planning",
|
||
objective=userInput,
|
||
dependencies=[],
|
||
success_criteria=[],
|
||
estimated_complexity="medium"
|
||
)
|
||
|
||
task_planning_context = TaskContext(
|
||
task_step=planning_task_step,
|
||
workflow=workflow,
|
||
workflow_id=workflow.id,
|
||
available_documents=None,
|
||
available_connections=None,
|
||
previous_results=[],
|
||
previous_handover=None,
|
||
improvements=[],
|
||
retry_count=0,
|
||
previous_action_results=[],
|
||
previous_review_result=None,
|
||
is_regeneration=False,
|
||
failure_patterns=[],
|
||
failed_actions=[],
|
||
successful_actions=[],
|
||
criteria_progress={
|
||
'met_criteria': set(),
|
||
'unmet_criteria': set(),
|
||
'attempt_history': []
|
||
}
|
||
)
|
||
|
||
# Generate the task planning prompt with placeholders
|
||
task_planning_prompt_template = createTaskPlanningPromptTemplate()
|
||
|
||
# Extract content for placeholders
|
||
user_prompt = extractUserPrompt(task_planning_context)
|
||
available_documents = extractAvailableDocuments(task_planning_context)
|
||
workflow_history = extractWorkflowHistory(self.service, task_planning_context)
|
||
|
||
# Create placeholders dictionary
|
||
placeholders = {
|
||
"USER_PROMPT": user_prompt,
|
||
"AVAILABLE_DOCUMENTS": available_documents,
|
||
"WORKFLOW_HISTORY": workflow_history
|
||
}
|
||
|
||
# Log task planning prompt sent to AI
|
||
logger.info("=== TASK PLANNING PROMPT SENT TO AI ===")
|
||
# Trace task planning prompt
|
||
self.writeTraceLog("Task Plan Prompt", task_planning_prompt_template)
|
||
self.writeTraceLog("Task Plan Placeholders", placeholders)
|
||
|
||
# Centralized AI call: Task planning (quality, detailed) with placeholders
|
||
options = AiCallOptions(
|
||
operationType=OperationType.GENERATE_PLAN,
|
||
priority=Priority.QUALITY,
|
||
compressPrompt=False,
|
||
compressContext=False,
|
||
processingMode=ProcessingMode.DETAILED,
|
||
maxCost=0.10,
|
||
maxProcessingTime=30
|
||
)
|
||
|
||
prompt = await self.services.ai.callAi(
|
||
prompt=task_planning_prompt_template,
|
||
placeholders=placeholders,
|
||
options=options
|
||
)
|
||
|
||
# Check if AI response is valid
|
||
if not prompt:
|
||
raise ValueError("AI service returned no response for task planning")
|
||
|
||
# Log task planning response received
|
||
logger.info("=== TASK PLANNING AI RESPONSE RECEIVED ===")
|
||
logger.info(f"Response length: {len(prompt) if prompt else 0}")
|
||
# Trace task planning response
|
||
self.writeTraceLog("Task Plan Response", prompt)
|
||
|
||
# Inline _parseTaskPlanResponse logic
|
||
try:
|
||
json_start = prompt.find('{')
|
||
json_end = prompt.rfind('}') + 1
|
||
if json_start == -1 or json_end == 0:
|
||
raise ValueError("No JSON found in response")
|
||
json_str = prompt[json_start:json_end]
|
||
task_plan_dict = json.loads(json_str)
|
||
|
||
if 'tasks' not in task_plan_dict:
|
||
raise ValueError("Task plan missing 'tasks' field")
|
||
except Exception as e:
|
||
logger.error(f"Error parsing task plan response: {str(e)}")
|
||
task_plan_dict = {'tasks': []}
|
||
|
||
if not self._validateTaskPlan(task_plan_dict):
|
||
logger.error("Generated task plan failed validation")
|
||
logger.error(f"AI Response: {prompt}")
|
||
logger.error(f"Parsed Task Plan: {json.dumps(task_plan_dict, indent=2)}")
|
||
raise Exception("AI-generated task plan failed validation - AI is required for task planning")
|
||
|
||
if not task_plan_dict.get('tasks'):
|
||
raise ValueError("Task plan contains no tasks")
|
||
|
||
# LANGUAGE DETECTION: Determine user language once for the entire workflow
|
||
# Priority: 1. languageUserDetected from AI response, 2. service.user.language, 3. "en"
|
||
detected_language = task_plan_dict.get('languageUserDetected', '').strip()
|
||
service_user_language = getattr(self.service.user, 'language', '') if self.service and self.service.user else ''
|
||
|
||
if detected_language and len(detected_language) == 2: # Valid language code like "en", "de", "fr"
|
||
user_language = detected_language
|
||
logger.info(f"Using detected language from AI response: {user_language}")
|
||
elif service_user_language and len(service_user_language) == 2:
|
||
user_language = service_user_language
|
||
logger.info(f"Using language from service user object: {user_language}")
|
||
else:
|
||
user_language = "en"
|
||
logger.info(f"Using default language: {user_language}")
|
||
|
||
# Set the detected language in the service for use throughout the workflow
|
||
if self.service and self.service.user:
|
||
self.service.user.language = user_language
|
||
logger.info(f"Set workflow user language to: {user_language}")
|
||
|
||
tasks = []
|
||
for i, task_dict in enumerate(task_plan_dict.get('tasks', [])):
|
||
if not isinstance(task_dict, dict):
|
||
logger.warning(f"Skipping invalid task {i+1}: not a dictionary")
|
||
continue
|
||
|
||
# Map old 'description' field to new 'objective' field
|
||
if 'description' in task_dict and 'objective' not in task_dict:
|
||
task_dict['objective'] = task_dict.pop('description')
|
||
|
||
try:
|
||
task = TaskStep(**task_dict)
|
||
tasks.append(task)
|
||
except Exception as e:
|
||
logger.warning(f"Skipping invalid task {i+1}: {str(e)}")
|
||
continue
|
||
|
||
if not tasks:
|
||
raise ValueError("No valid tasks could be created from AI response")
|
||
|
||
task_plan = TaskPlan(
|
||
overview=task_plan_dict.get('overview', ''),
|
||
tasks=tasks,
|
||
userMessage=task_plan_dict.get('userMessage', '')
|
||
)
|
||
|
||
# Set workflow totals for progress tracking
|
||
total_tasks = len(tasks)
|
||
if total_tasks == 0:
|
||
raise ValueError("Task plan contains no valid tasks")
|
||
|
||
self.setWorkflowTotals(total_tasks=total_tasks)
|
||
|
||
logger.info(f"Task plan generated successfully with {len(tasks)} tasks")
|
||
logger.info(f"Workflow user language set to: {user_language}")
|
||
|
||
# PHASE 3: Create chat message containing the task plan
|
||
await self.createTaskPlanMessage(task_plan, workflow)
|
||
|
||
return task_plan
|
||
except Exception as e:
|
||
logger.error(f"Error in generateTaskPlan: {str(e)}")
|
||
raise
|
||
|
||
async def createTaskPlanMessage(self, task_plan: TaskPlan, workflow):
|
||
"""Create a chat message containing the task plan with user-friendly messages"""
|
||
try:
|
||
# Build task plan summary
|
||
task_summary = f"📋 **Task Plan**\n\n"
|
||
|
||
# Get overall user message from task plan if available
|
||
overall_message = task_plan.userMessage
|
||
if overall_message:
|
||
task_summary += f"{overall_message}\n\n"
|
||
|
||
# Add each task with its user message
|
||
for i, task in enumerate(task_plan.tasks):
|
||
if task.userMessage:
|
||
task_summary += f"💬 {task.userMessage}\n"
|
||
task_summary += "\n"
|
||
|
||
|
||
# Create workflow message
|
||
message_data = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": task_summary,
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"documentsLabel": "task_plan",
|
||
"documents": [],
|
||
# Add workflow context fields - use current workflow round instead of hardcoded 1
|
||
"roundNumber": workflow.currentRound, # Use current workflow round
|
||
"taskNumber": 1, # Task plan is before individual tasks; to keep 1, that UI not filtering the message
|
||
"actionNumber": 0,
|
||
# Add task progress status
|
||
"taskProgress": "pending"
|
||
}
|
||
|
||
message = services.chatInterface.createMessage(message_data)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
|
||
# PHASE 4: Update workflow object after task plan created
|
||
# Set currentTask=1, currentAction=0, totalTasks=len(task_plan.tasks), totalActions=0
|
||
self.updateWorkflowAfterTaskPlanCreated(len(task_plan.tasks))
|
||
|
||
logger.info(f"Task plan message created with {len(task_plan.tasks)} tasks")
|
||
else:
|
||
logger.error("Failed to create task plan message")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error creating task plan message: {str(e)}")
|
||
|
||
async def generateTaskActions(self, task_step, workflow, previous_results=None, enhanced_context=None) -> List[TaskAction]:
|
||
"""Generate actions for a given task step."""
|
||
try:
|
||
# Check workflow status before generating actions
|
||
self._checkWorkflowStopped()
|
||
|
||
retry_info = f" (Retry #{enhanced_context.retry_count})" if enhanced_context and enhanced_context.retry_count > 0 else ""
|
||
logger.info(f"Generating actions for task: {task_step.objective}{retry_info}")
|
||
|
||
# Log criteria progress if this is a retry
|
||
if enhanced_context and hasattr(enhanced_context, 'criteria_progress') and enhanced_context.criteria_progress is not None:
|
||
progress = enhanced_context.criteria_progress
|
||
logger.info(f"Retry attempt {enhanced_context.retry_count} - Criteria progress:")
|
||
if progress.get('met_criteria'):
|
||
logger.info(f" Met criteria: {', '.join(progress['met_criteria'])}")
|
||
if progress.get('unmet_criteria'):
|
||
logger.warning(f" Unmet criteria: {', '.join(progress['unmet_criteria'])}")
|
||
|
||
# Show improvement trends
|
||
if progress.get('attempt_history'):
|
||
recent_attempts = progress['attempt_history'][-2:] # Last 2 attempts
|
||
if len(recent_attempts) >= 2:
|
||
prev_score = recent_attempts[0].get('quality_score', 0)
|
||
curr_score = recent_attempts[1].get('quality_score', 0)
|
||
if curr_score > prev_score:
|
||
logger.info(f" Quality improving: {prev_score} -> {curr_score}")
|
||
elif curr_score < prev_score:
|
||
logger.warning(f" Quality declining: {prev_score} -> {curr_score}")
|
||
else:
|
||
logger.info(f" Quality stable: {curr_score}")
|
||
|
||
# Enhanced retry context logging
|
||
if enhanced_context and enhanced_context.retry_count > 0:
|
||
logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===")
|
||
logger.info(f"Retry Count: {enhanced_context.retry_count}")
|
||
logger.debug(f"Previous Improvements: {enhanced_context.improvements}")
|
||
logger.debug(f"Previous Review Result: {enhanced_context.previous_review_result}")
|
||
logger.debug(f"Failure Patterns: {enhanced_context.failure_patterns}")
|
||
logger.debug(f"Failed Actions: {enhanced_context.failed_actions}")
|
||
logger.debug(f"Successful Actions: {enhanced_context.successful_actions}")
|
||
logger.info("=== END RETRY CONTEXT ===")
|
||
|
||
# Log that we're starting action generation
|
||
logger.info("=== STARTING ACTION GENERATION ===")
|
||
|
||
# Create proper context object for action definition
|
||
if enhanced_context and isinstance(enhanced_context, TaskContext):
|
||
# Use existing TaskContext if provided
|
||
action_context = TaskContext(
|
||
task_step=enhanced_context.task_step,
|
||
workflow=enhanced_context.workflow,
|
||
workflow_id=enhanced_context.workflow_id,
|
||
available_documents=enhanced_context.available_documents,
|
||
available_connections=enhanced_context.available_connections,
|
||
previous_results=enhanced_context.previous_results or previous_results or [],
|
||
previous_handover=enhanced_context.previous_handover,
|
||
improvements=enhanced_context.improvements or [],
|
||
retry_count=enhanced_context.retry_count or 0,
|
||
previous_action_results=enhanced_context.previous_action_results or [],
|
||
previous_review_result=enhanced_context.previous_review_result,
|
||
is_regeneration=enhanced_context.is_regeneration or False,
|
||
failure_patterns=enhanced_context.failure_patterns or [],
|
||
failed_actions=enhanced_context.failed_actions or [],
|
||
successful_actions=enhanced_context.successful_actions or [],
|
||
criteria_progress=enhanced_context.criteria_progress
|
||
)
|
||
else:
|
||
# Create new context from scratch
|
||
action_context = TaskContext(
|
||
task_step=task_step,
|
||
workflow=workflow,
|
||
workflow_id=workflow.id,
|
||
available_documents=None,
|
||
available_connections=None,
|
||
previous_results=previous_results or [],
|
||
previous_handover=None,
|
||
improvements=[],
|
||
retry_count=0,
|
||
previous_action_results=[],
|
||
previous_review_result=None,
|
||
is_regeneration=False,
|
||
failure_patterns=[],
|
||
failed_actions=[],
|
||
successful_actions=[],
|
||
criteria_progress=None
|
||
)
|
||
|
||
# Check workflow status before calling AI service
|
||
self._checkWorkflowStopped()
|
||
|
||
# Generate the action definition prompt with placeholders
|
||
action_prompt_template = createActionDefinitionPromptTemplate()
|
||
|
||
# Extract content for placeholders
|
||
user_prompt = extractUserPrompt(action_context)
|
||
available_documents = extractAvailableDocuments(action_context)
|
||
workflow_history = extractWorkflowHistory(self.service, action_context)
|
||
available_methods = extractAvailableMethods(self.service)
|
||
user_language = extractUserLanguage(self.service)
|
||
|
||
# Create placeholders dictionary
|
||
placeholders = {
|
||
"USER_PROMPT": user_prompt,
|
||
"AVAILABLE_DOCUMENTS": available_documents,
|
||
"WORKFLOW_HISTORY": workflow_history,
|
||
"AVAILABLE_METHODS": available_methods,
|
||
"USER_LANGUAGE": user_language
|
||
}
|
||
|
||
# Trace action planning prompt
|
||
self.writeTraceLog("Action Plan Prompt", action_prompt_template)
|
||
self.writeTraceLog("Action Plan Placeholders", placeholders)
|
||
|
||
# Centralized AI call: Action planning (quality, detailed) with placeholders
|
||
options = AiCallOptions(
|
||
operationType=OperationType.GENERATE_PLAN,
|
||
priority=Priority.QUALITY,
|
||
compressPrompt=False,
|
||
compressContext=False,
|
||
processingMode=ProcessingMode.DETAILED,
|
||
maxCost=0.10,
|
||
maxProcessingTime=30
|
||
)
|
||
|
||
prompt = await self.services.ai.callAi(
|
||
prompt=action_prompt_template,
|
||
placeholders=placeholders,
|
||
options=options
|
||
)
|
||
|
||
# Check if AI response is valid
|
||
if not prompt:
|
||
raise ValueError("AI service returned no response")
|
||
|
||
# Log action response received
|
||
logger.info("=== ACTION PLAN AI RESPONSE RECEIVED ===")
|
||
logger.info(f"Response length: {len(prompt) if prompt else 0}")
|
||
# Trace action planning response
|
||
self.writeTraceLog("Action Plan Response", prompt)
|
||
|
||
# Inline parseActionResponse logic here
|
||
json_start = prompt.find('{')
|
||
json_end = prompt.rfind('}') + 1
|
||
if json_start == -1 or json_end == 0:
|
||
raise ValueError("No JSON found in response")
|
||
json_str = prompt[json_start:json_end]
|
||
|
||
try:
|
||
action_data = json.loads(json_str)
|
||
except Exception as e:
|
||
logger.error(f"Error parsing action response JSON: {str(e)}")
|
||
action_data = {}
|
||
|
||
if 'actions' not in action_data:
|
||
raise ValueError("Action response missing 'actions' field")
|
||
|
||
actions = action_data['actions']
|
||
if not actions:
|
||
raise ValueError("Action response contains empty actions list")
|
||
|
||
if not isinstance(actions, list):
|
||
raise ValueError(f"Action response 'actions' field is not a list: {type(actions)}")
|
||
|
||
if not self._validateActions(actions, action_context):
|
||
logger.error("Generated actions failed validation")
|
||
raise Exception("AI-generated actions failed validation - AI is required for action generation")
|
||
|
||
# Convert to TaskAction objects
|
||
task_actions = []
|
||
for i, a in enumerate(actions):
|
||
if not isinstance(a, dict):
|
||
logger.warning(f"Skipping invalid action {i+1}: not a dictionary")
|
||
continue
|
||
|
||
task_action = self.createTaskAction({
|
||
"execMethod": a.get('method', 'unknown'),
|
||
"execAction": a.get('action', 'unknown'),
|
||
"execParameters": a.get('parameters', {}),
|
||
"execResultLabel": a.get('resultLabel', ''),
|
||
"expectedDocumentFormats": a.get('expectedDocumentFormats', None),
|
||
"status": TaskStatus.PENDING,
|
||
# Extract user-friendly message if available
|
||
"userMessage": a.get('userMessage', None)
|
||
})
|
||
|
||
if task_action:
|
||
task_actions.append(task_action)
|
||
else:
|
||
logger.warning(f"Skipping invalid action {i+1}: failed to create TaskAction")
|
||
|
||
valid_actions = [ta for ta in task_actions if ta]
|
||
|
||
if not valid_actions:
|
||
raise ValueError("No valid actions could be created from AI response")
|
||
|
||
return valid_actions
|
||
except Exception as e:
|
||
logger.error(f"Error in generateTaskActions: {str(e)}")
|
||
return []
|
||
|
||
# ===== React-mode iterative functions =====
|
||
|
||
async def plan_select(self, context: TaskContext) -> Dict[str, Any]:
|
||
"""Plan: select exactly one action. Returns {"action": {method, name}}"""
|
||
prompt_template = createActionSelectionPromptTemplate()
|
||
|
||
# Extract content for placeholders
|
||
user_prompt = extractUserPrompt(context)
|
||
available_documents = extractAvailableDocuments(context)
|
||
user_language = extractUserLanguage(self.service)
|
||
available_methods = extractAvailableMethods(self.service)
|
||
|
||
# Create placeholders dictionary
|
||
placeholders = {
|
||
"USER_PROMPT": user_prompt,
|
||
"AVAILABLE_DOCUMENTS": available_documents,
|
||
"USER_LANGUAGE": user_language,
|
||
"AVAILABLE_METHODS": available_methods
|
||
}
|
||
|
||
self.writeTraceLog("React Plan Selection Prompt", prompt_template)
|
||
self.writeTraceLog("React Plan Selection Placeholders", placeholders)
|
||
|
||
# Centralized AI call for plan selection (use plan generation quality)
|
||
options = AiCallOptions(
|
||
operationType=OperationType.GENERATE_PLAN,
|
||
priority=Priority.QUALITY,
|
||
compressPrompt=False,
|
||
compressContext=False,
|
||
processingMode=ProcessingMode.DETAILED,
|
||
maxCost=0.10,
|
||
maxProcessingTime=30
|
||
)
|
||
|
||
response = await self.services.ai.callAi(
|
||
prompt=prompt_template,
|
||
placeholders=placeholders,
|
||
options=options
|
||
)
|
||
self.writeTraceLog("React Plan Selection Response", response)
|
||
json_start = response.find('{') if response else -1
|
||
json_end = response.rfind('}') + 1 if response else 0
|
||
if json_start == -1 or json_end == 0:
|
||
raise ValueError("No JSON in selection response")
|
||
selection = json.loads(response[json_start:json_end])
|
||
if 'action' not in selection or not isinstance(selection['action'], dict):
|
||
raise ValueError("Selection missing 'action'")
|
||
return selection
|
||
|
||
async def act_execute(self, context: TaskContext, selection: Dict[str, Any], task_step: TaskStep, workflow, step_index: int) -> ActionResult:
|
||
"""Act: request minimal parameters then execute selected action."""
|
||
action = selection.get('action', {})
|
||
prompt_template = createActionParameterPromptTemplate()
|
||
|
||
# Extract content for placeholders
|
||
user_prompt = extractUserPrompt(context)
|
||
available_documents = extractAvailableDocuments(context)
|
||
user_language = extractUserLanguage(self.service)
|
||
|
||
# Get action signature
|
||
method = action.get('method', '')
|
||
name = action.get('name', '')
|
||
action_signature = ""
|
||
if self.service and method in methods:
|
||
method_instance = methods[method]['instance']
|
||
action_signature = method_instance.getActionSignature(name)
|
||
|
||
selected_action = f"{method}.{name}"
|
||
|
||
# Create placeholders dictionary
|
||
placeholders = {
|
||
"USER_PROMPT": user_prompt,
|
||
"AVAILABLE_DOCUMENTS": available_documents,
|
||
"USER_LANGUAGE": user_language,
|
||
"SELECTED_ACTION": selected_action,
|
||
"ACTION_SIGNATURE": action_signature
|
||
}
|
||
|
||
self.writeTraceLog("React Parameters Prompt", prompt_template)
|
||
self.writeTraceLog("React Parameters Placeholders", placeholders)
|
||
|
||
# Centralized AI call for parameter suggestion (balanced analysis)
|
||
options = AiCallOptions(
|
||
operationType=OperationType.ANALYSE_CONTENT,
|
||
priority=Priority.BALANCED,
|
||
compressPrompt=True,
|
||
compressContext=False,
|
||
processingMode=ProcessingMode.ADVANCED,
|
||
maxCost=0.05,
|
||
maxProcessingTime=30
|
||
)
|
||
|
||
params_resp = await self.services.ai.callAi(
|
||
prompt=prompt_template,
|
||
placeholders=placeholders,
|
||
options=options
|
||
)
|
||
self.writeTraceLog("React Parameters Response", params_resp)
|
||
js = params_resp[params_resp.find('{'):params_resp.rfind('}')+1] if params_resp else '{}'
|
||
try:
|
||
param_obj = json.loads(js)
|
||
except Exception:
|
||
param_obj = {"parameters": {}}
|
||
parameters = param_obj.get('parameters', {}) if isinstance(param_obj, dict) else {}
|
||
|
||
# Apply minimal defaults in-code (language)
|
||
if 'language' not in parameters and hasattr(self.service, 'user') and getattr(self.service.user, 'language', None):
|
||
parameters['language'] = self.service.user.language
|
||
|
||
# Build a synthetic TaskAction for execution routing and labels
|
||
current_round = getattr(self.workflow, 'currentRound', 0)
|
||
current_task = getattr(self.workflow, 'currentTask', 0)
|
||
result_label = f"round{current_round}_task{current_task}_action{step_index}_results"
|
||
task_action = self.createTaskAction({
|
||
"execMethod": action.get('method', ''),
|
||
"execAction": action.get('name', ''),
|
||
"execParameters": parameters,
|
||
"execResultLabel": result_label,
|
||
"status": TaskStatus.PENDING
|
||
})
|
||
# Execute using existing single action flow
|
||
return await self.executeSingleAction(task_action, workflow, task_step, current_task, step_index, 1)
|
||
|
||
def observe_build(self, action_result: ActionResult) -> Dict[str, Any]:
|
||
"""Observe: build compact observation object from ActionResult"""
|
||
previews = []
|
||
if action_result and action_result.documents:
|
||
for doc in action_result.documents[:5]:
|
||
name = getattr(doc, 'documentName', '')
|
||
mime = getattr(doc, 'mimeType', '')
|
||
snippet = ''
|
||
data = getattr(doc, 'documentData', None)
|
||
if isinstance(data, str):
|
||
snippet = data[:200]
|
||
elif isinstance(data, dict):
|
||
snippet = str(data)[:200]
|
||
previews.append({"name": name, "mime": mime, "snippet": snippet})
|
||
observation = {
|
||
"success": bool(action_result.success),
|
||
"resultLabel": action_result.resultLabel or "",
|
||
"documentsCount": len(action_result.documents) if action_result.documents else 0,
|
||
"previews": previews,
|
||
"notes": []
|
||
}
|
||
return observation
|
||
|
||
async def refine_decide(self, context: TaskContext, observation: Dict[str, Any]) -> Dict[str, Any]:
|
||
"""Refine: decide continue or stop, with reason"""
|
||
prompt_template = createRefinementPromptTemplate()
|
||
|
||
# Extract content for placeholders
|
||
user_prompt = extractUserPrompt(context)
|
||
review_content = extractReviewContent(type('Context', (), {'observation': observation})())
|
||
|
||
# Create placeholders dictionary
|
||
placeholders = {
|
||
"USER_PROMPT": user_prompt,
|
||
"REVIEW_CONTENT": review_content
|
||
}
|
||
|
||
self.writeTraceLog("React Refinement Prompt", prompt_template)
|
||
self.writeTraceLog("React Refinement Placeholders", placeholders)
|
||
|
||
# Centralized AI call for refinement decision (balanced analysis)
|
||
options = AiCallOptions(
|
||
operationType=OperationType.ANALYSE_CONTENT,
|
||
priority=Priority.BALANCED,
|
||
compressPrompt=True,
|
||
compressContext=False,
|
||
processingMode=ProcessingMode.ADVANCED,
|
||
maxCost=0.05,
|
||
maxProcessingTime=30
|
||
)
|
||
|
||
resp = await self.services.ai.callAi(
|
||
prompt=prompt_template,
|
||
placeholders=placeholders,
|
||
options=options
|
||
)
|
||
self.writeTraceLog("React Refinement Response", resp)
|
||
js = resp[resp.find('{'):resp.rfind('}')+1] if resp else '{}'
|
||
try:
|
||
decision = json.loads(js)
|
||
except Exception:
|
||
decision = {"decision": "continue", "reason": "default"}
|
||
return decision
|
||
|
||
async def executeTask(self, task_step, workflow, context, task_index=None, total_tasks=None) -> TaskResult:
|
||
"""Execute all actions for a task step, with state management and retries.
|
||
When workflow.workflowMode is 'React', run compact plan–act–observe–refine loop.
|
||
"""
|
||
logger.info(f"=== STARTING TASK {task_index or '?'}: {task_step.objective} ===")
|
||
|
||
# PHASE 4: Update workflow object before executing task
|
||
# Set currentTask=task_number, currentAction=0, totalActions=0
|
||
if task_index is not None:
|
||
self.updateWorkflowBeforeExecutingTask(task_index)
|
||
|
||
# Update workflow context for this task
|
||
if task_index is not None:
|
||
self.service.setWorkflowContext(task_number=task_index)
|
||
# Remove the increment call that causes double-increment bug
|
||
|
||
# Create database log entry for task start in format expected by frontend
|
||
if task_index is not None:
|
||
|
||
# Create a task start message for the user
|
||
task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
|
||
task_start_message = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": f"🚀 **Task {task_progress}**",
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"documentsLabel": f"task_{task_index}_start",
|
||
"documents": [],
|
||
# Add workflow context fields
|
||
"roundNumber": workflow.currentRound, # Use current workflow round
|
||
"taskNumber": task_index,
|
||
"actionNumber": 0,
|
||
# Add task progress status
|
||
"taskProgress": "running"
|
||
}
|
||
|
||
# Add user-friendly message if available
|
||
if task_step.userMessage:
|
||
task_start_message["message"] += f"\n\n💬 {task_step.userMessage}"
|
||
|
||
message = services.chatInterface.createMessage(task_start_message)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
logger.info(f"Task start message created for task {task_index}")
|
||
|
||
state = TaskExecutionState(task_step)
|
||
# React mode path - check workflow mode instead of context
|
||
workflow_mode = getattr(context.workflow, 'workflowMode', 'Actionplan') if context.workflow else 'Actionplan'
|
||
logger.info(f"Task execution - workflow mode: {workflow_mode}")
|
||
if isinstance(context, TaskContext) and hasattr(context, 'workflow') and context.workflow and workflow_mode == 'React':
|
||
logger.info(f"Using React mode execution with max_steps: {getattr(context.workflow, 'maxSteps', 5)}")
|
||
state.max_steps = max(1, int(getattr(context.workflow, 'maxSteps', 5)))
|
||
step = 1
|
||
last_review_dict = None
|
||
while step <= state.max_steps:
|
||
self._checkWorkflowStopped()
|
||
# Update workflow[currentAction] for UI
|
||
self.updateWorkflowBeforeExecutingAction(step)
|
||
self.service.setWorkflowContext(action_number=step)
|
||
try:
|
||
t0 = time.time()
|
||
selection = await self.plan_select(context)
|
||
logger.info(f"React step {step}: Selected action: {selection}")
|
||
result = await self.act_execute(context, selection, task_step, workflow, step)
|
||
observation = self.observe_build(result)
|
||
# Attach deterministic label for clarity
|
||
observation['resultLabel'] = result.resultLabel
|
||
decision = await self.refine_decide(context, observation)
|
||
# Telemetry: simple duration per step
|
||
duration = time.time() - t0
|
||
services.chatInterface.createLog({
|
||
"workflowId": workflow.id,
|
||
"message": f"react_step_duration_sec={duration:.3f}",
|
||
"type": "info"
|
||
})
|
||
last_review_dict = decision
|
||
# Simple messaging per iteration
|
||
msg = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": f"🔁 Step {step}/{state.max_steps}: {selection.get('action',{}).get('method','')}.{selection.get('action',{}).get('name','')} → {'✅' if result.success else '❌'}",
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"documentsLabel": observation.get('resultLabel'),
|
||
"documents": [],
|
||
"roundNumber": workflow.currentRound,
|
||
"taskNumber": task_index,
|
||
"actionNumber": step,
|
||
"actionProgress": "success" if result.success else "fail"
|
||
}
|
||
services.chatInterface.createMessage(msg)
|
||
except Exception as e:
|
||
logger.error(f"React step {step} error: {e}")
|
||
break
|
||
|
||
if not should_continue(observation, last_review_dict, step, state.max_steps):
|
||
break
|
||
step += 1
|
||
|
||
# Summarize task result for react mode
|
||
status = TaskStatus.COMPLETED
|
||
success = True
|
||
feedback = last_review_dict.get('reason') if isinstance(last_review_dict, dict) else 'Completed'
|
||
if isinstance(last_review_dict, dict) and last_review_dict.get('decision') == 'stop':
|
||
success = True
|
||
return TaskResult(
|
||
taskId=task_step.id,
|
||
status=status,
|
||
success=success,
|
||
feedback=feedback,
|
||
error=None if success else feedback
|
||
)
|
||
else:
|
||
# Actionplan mode execution
|
||
logger.info(f"Using Actionplan mode execution")
|
||
|
||
retry_context = context
|
||
max_retries = state.max_retries
|
||
for attempt in range(max_retries):
|
||
logger.info(f"Task execution attempt {attempt+1}/{max_retries}")
|
||
|
||
# Check workflow status before starting task execution
|
||
self._checkWorkflowStopped()
|
||
|
||
# Update retry context with current attempt information
|
||
if retry_context:
|
||
retry_context.retry_count = attempt + 1
|
||
|
||
actions = await self.generateTaskActions(task_step, workflow, previous_results=retry_context.previous_results, enhanced_context=retry_context)
|
||
|
||
# Log total actions count for this task
|
||
total_actions = len(actions) if actions else 0
|
||
logger.info(f"Task {task_index or '?'} has {total_actions} actions")
|
||
|
||
# PHASE 4: Update workflow object after action planning
|
||
# Set totalActions=extracted_total_actions for THIS task
|
||
self.updateWorkflowAfterActionPlanning(total_actions)
|
||
|
||
# Set workflow action total for this task (0 if no actions generated)
|
||
self.setWorkflowTotals(total_actions=total_actions)
|
||
|
||
if not actions:
|
||
logger.error("No actions defined for task step, aborting task execution")
|
||
break
|
||
|
||
action_results = []
|
||
for action_idx, action in enumerate(actions):
|
||
# Check workflow status before each action execution
|
||
self._checkWorkflowStopped()
|
||
|
||
# PHASE 4: Update workflow object before executing action
|
||
# Set currentAction=action_number
|
||
action_number = action_idx + 1
|
||
self.updateWorkflowBeforeExecutingAction(action_number)
|
||
|
||
# Update workflow context for this action
|
||
self.service.setWorkflowContext(action_number=action_number)
|
||
# Remove the increment call that causes double-increment bug
|
||
|
||
# Log action start in format expected by frontend
|
||
logger.info(f"Task {task_index} - Starting action {action_number}/{total_actions}")
|
||
|
||
# Create an action start message for the user
|
||
action_start_message = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": f"⚡ **Action {action_number}/{total_actions}** (Method {action.execMethod}.{action.execAction})",
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"documentsLabel": f"action_{action_number}_start",
|
||
"documents": [],
|
||
# Add action progress status
|
||
"actionProgress": "running"
|
||
}
|
||
|
||
# Add user-friendly message if available
|
||
if action.userMessage:
|
||
action_start_message["message"] += f"\n\n💬 {action.userMessage}"
|
||
|
||
# Add workflow context fields - use current workflow round instead of hardcoded 1
|
||
action_start_message.update({
|
||
"roundNumber": workflow.currentRound, # Use current workflow round
|
||
"taskNumber": task_index,
|
||
"actionNumber": action_number
|
||
})
|
||
|
||
message = services.chatInterface.createMessage(action_start_message)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
logger.info(f"Action start message created for action {action_number}")
|
||
|
||
# Pass action index to executeSingleAction with task context
|
||
result = await self.executeSingleAction(action, workflow, task_step, task_index, action_number, total_actions)
|
||
action_results.append(result)
|
||
if result.success:
|
||
state.addSuccessfulAction(result)
|
||
else:
|
||
state.addFailedAction(result)
|
||
|
||
# Check workflow status before review
|
||
self._checkWorkflowStopped()
|
||
|
||
review_result = await self.reviewTaskCompletion(task_step, actions, action_results, workflow)
|
||
success = review_result.status == 'success'
|
||
feedback = review_result.reason
|
||
error = None if success else review_result.reason
|
||
if success:
|
||
logger.info(f"=== TASK {task_index or '?'} COMPLETED SUCCESSFULLY: {task_step.objective} ===")
|
||
|
||
# Create a task completion message for the user
|
||
task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
|
||
|
||
# Enhanced completion message with criteria details
|
||
completion_message = f"🎯 **Task {task_progress}**\n\n✅ {feedback or 'Task completed successfully'}"
|
||
|
||
# Add criteria status if available
|
||
if hasattr(review_result, 'met_criteria') and review_result.met_criteria:
|
||
for criterion in review_result.met_criteria:
|
||
completion_message += f"\n• {criterion}"
|
||
|
||
if hasattr(review_result, 'quality_score'):
|
||
completion_message += f"\n📊 Score {review_result.quality_score}/10"
|
||
|
||
task_completion_message = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": completion_message,
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"documentsLabel": f"task_{task_index}_completion",
|
||
"documents": [],
|
||
# Add workflow context fields
|
||
"roundNumber": workflow.currentRound, # Use current workflow round
|
||
"taskNumber": task_index,
|
||
"actionNumber": 0,
|
||
# Add task progress status
|
||
"taskProgress": "success"
|
||
}
|
||
|
||
message = services.chatInterface.createMessage(task_completion_message)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
logger.info(f"Task completion message created for task {task_index}")
|
||
|
||
return TaskResult(
|
||
taskId=task_step.id,
|
||
status=TaskStatus.COMPLETED,
|
||
success=True,
|
||
feedback=feedback,
|
||
error=None
|
||
)
|
||
|
||
elif review_result.status == 'retry' and state.canRetry():
|
||
logger.warning(f"Task step '{task_step.objective}' requires retry: {review_result.improvements}")
|
||
|
||
# Enhanced logging of criteria status
|
||
if review_result.met_criteria:
|
||
logger.info(f"Met criteria: {', '.join(review_result.met_criteria)}")
|
||
if review_result.unmet_criteria:
|
||
logger.warning(f"Unmet criteria: {', '.join(review_result.unmet_criteria)}")
|
||
|
||
state.incrementRetryCount()
|
||
|
||
# Update retry context with retry information and criteria tracking
|
||
if retry_context:
|
||
retry_context.retry_count = state.retry_count
|
||
retry_context.improvements = review_result.improvements
|
||
retry_context.previous_action_results = action_results
|
||
retry_context.previous_review_result = review_result
|
||
retry_context.is_regeneration = True
|
||
retry_context.failure_patterns = state.getFailurePatterns()
|
||
retry_context.failed_actions = state.failed_actions
|
||
retry_context.successful_actions = state.successful_actions
|
||
|
||
# Track criteria progress across retries
|
||
if not hasattr(retry_context, 'criteria_progress'):
|
||
retry_context.criteria_progress = {
|
||
'met_criteria': set(),
|
||
'unmet_criteria': set(),
|
||
'attempt_history': []
|
||
}
|
||
|
||
# Update criteria progress - convert lists to sets for deduplication
|
||
if review_result.met_criteria:
|
||
retry_context.criteria_progress['met_criteria'].update(review_result.met_criteria)
|
||
if review_result.unmet_criteria:
|
||
retry_context.criteria_progress['unmet_criteria'].update(review_result.unmet_criteria)
|
||
|
||
# Record this attempt's criteria status
|
||
attempt_record = {
|
||
'attempt': state.retry_count,
|
||
'met_criteria': review_result.met_criteria or [],
|
||
'unmet_criteria': review_result.unmet_criteria or [],
|
||
'quality_score': review_result.quality_score,
|
||
'improvements': review_result.improvements or []
|
||
}
|
||
retry_context.criteria_progress['attempt_history'].append(attempt_record)
|
||
|
||
logger.info(f"Criteria progress after {state.retry_count} attempts:")
|
||
logger.info(f" Total met: {len(retry_context.criteria_progress['met_criteria'])}")
|
||
logger.info(f" Total unmet: {len(retry_context.criteria_progress['unmet_criteria'])}")
|
||
if retry_context.criteria_progress['met_criteria']:
|
||
logger.info(f" Met criteria: {', '.join(retry_context.criteria_progress['met_criteria'])}")
|
||
if retry_context.criteria_progress['unmet_criteria']:
|
||
logger.info(f" Unmet criteria: {', '.join(retry_context.criteria_progress['unmet_criteria'])}")
|
||
|
||
# Log retry summary for debugging
|
||
logger.info(f"=== RETRY #{state.retry_count} SUMMARY ===")
|
||
logger.info(f"Task: {task_step.objective}")
|
||
logger.info(f"Quality Score: {review_result.quality_score}/10")
|
||
logger.info(f"Status: {review_result.status}")
|
||
logger.info(f"Improvements Needed: {review_result.improvements}")
|
||
logger.info(f"Reason: {review_result.reason}")
|
||
logger.info("=== END RETRY SUMMARY ===")
|
||
|
||
# Create retry message for user
|
||
retry_message = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": f"🔄 **Task {task_index}** needs retry: {review_result.improvements}",
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"documentsLabel": f"task_{task_index}_retry",
|
||
"documents": [],
|
||
"roundNumber": workflow.currentRound,
|
||
"taskNumber": task_index,
|
||
"actionNumber": 0,
|
||
"taskProgress": "retry"
|
||
}
|
||
|
||
message = services.chatInterface.createMessage(retry_message)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
|
||
continue
|
||
else:
|
||
logger.error(f"=== TASK {task_index or '?'} FAILED: {task_step.objective} after {attempt+1} attempts ===")
|
||
task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
|
||
|
||
# Create user-facing error message for task failure
|
||
error_message = f"**Task {task_progress}**\n\n❌ '{task_step.objective}' {attempt+1}x failed\n\n"
|
||
|
||
# Add specific error details if available
|
||
if review_result and hasattr(review_result, 'reason') and review_result.reason:
|
||
error_message += f"{review_result.reason}\n\n"
|
||
|
||
# Add criteria progress information if available
|
||
if retry_context and hasattr(retry_context, 'criteria_progress'):
|
||
progress = retry_context.criteria_progress
|
||
error_message += f"📊 **Details**\n"
|
||
if progress.get('met_criteria'):
|
||
error_message += f"✅ Met criteria: {', '.join(progress['met_criteria'])}\n"
|
||
if progress.get('unmet_criteria'):
|
||
error_message += f"❌ Unmet criteria: {', '.join(progress['unmet_criteria'])}\n"
|
||
error_message += "\n"
|
||
|
||
# Add retry information
|
||
error_message += f"Attempts: {attempt+1}\n"
|
||
error_message += f"Status: Will retry automatically\n\n"
|
||
error_message += "The system will attempt to retry this task. Please wait..."
|
||
|
||
# Create workflow message for user
|
||
message_data = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": error_message,
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"actionId": None,
|
||
"actionMethod": "task",
|
||
"actionName": "task_retry",
|
||
"documentsLabel": None,
|
||
"documents": [],
|
||
# Add workflow context fields
|
||
"roundNumber": workflow.currentRound, # Use current workflow round
|
||
"taskNumber": task_index,
|
||
"actionNumber": 0,
|
||
# Add task progress status
|
||
"taskProgress": "retry"
|
||
}
|
||
|
||
try:
|
||
message = services.chatInterface.createMessage(message_data)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
logger.info(f"Created user-facing retry message for failed task: {task_step.objective}")
|
||
else:
|
||
logger.error(f"Failed to create user-facing retry message for failed task: {task_step.objective}")
|
||
except Exception as e:
|
||
logger.error(f"Error creating user-facing retry message: {str(e)}")
|
||
|
||
return TaskResult(
|
||
taskId=task_step.id,
|
||
status=TaskStatus.FAILED,
|
||
success=False,
|
||
feedback=feedback,
|
||
error=review_result.reason if review_result and hasattr(review_result, 'reason') else "Task failed after retry attempts"
|
||
)
|
||
logger.error(f"=== TASK {task_index or '?'} FAILED AFTER ALL RETRIES: {task_step.objective} ===")
|
||
|
||
# Create user-facing error message for task failure
|
||
error_message = f"**Task {task_index or '?'}**\n\n❌ '{task_step.objective}' failed after all retries\n\n"
|
||
error_message += f"{task_step.objective}\n\n"
|
||
|
||
# Add specific error details if available
|
||
if retry_context and hasattr(retry_context, 'previous_review_result') and retry_context.previous_review_result:
|
||
reason = retry_context.previous_review_result.reason or ''
|
||
if reason and reason != "Task failed after all retries.":
|
||
error_message += f"{reason}\n\n"
|
||
|
||
# Add retry information
|
||
error_message += f"Retries attempted: {retry_context.retry_count if retry_context else 'Unknown'}\n"
|
||
error_message += f"Status: Task failed permanently"
|
||
|
||
# Create workflow message for user
|
||
message_data = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": error_message,
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"actionId": None,
|
||
"actionMethod": "task",
|
||
"actionName": "task_failure",
|
||
"documentsLabel": None,
|
||
"documents": [],
|
||
# Add workflow context fields
|
||
"roundNumber": workflow.currentRound, # Use current workflow round
|
||
"taskNumber": task_index,
|
||
"actionNumber": 0,
|
||
# NEW: Add task progress status
|
||
"taskProgress": "fail"
|
||
}
|
||
|
||
try:
|
||
message = services.chatInterface.createMessage(message_data)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
logger.info(f"Created user-facing error message for failed task: {task_step.objective}")
|
||
else:
|
||
logger.error(f"Failed to create user-facing error message for failed task: {task_step.objective}")
|
||
except Exception as e:
|
||
logger.error(f"Error creating user-facing error message: {str(e)}")
|
||
|
||
return TaskResult(
|
||
taskId=task_step.id,
|
||
status=TaskStatus.FAILED,
|
||
success=False,
|
||
feedback="Task failed after all retries.",
|
||
error="Task failed after all retries."
|
||
)
|
||
|
||
async def reviewTaskCompletion(self, task_step, task_actions, action_results, workflow):
|
||
try:
|
||
# Check workflow status before reviewing task completion
|
||
self._checkWorkflowStopped()
|
||
|
||
logger.info(f"=== STARTING TASK COMPLETION REVIEW ===")
|
||
logger.info(f"Task: {task_step.objective}")
|
||
logger.info(f"Actions executed: {len(task_actions) if task_actions else 0}")
|
||
logger.info(f"Action results: {len(action_results) if action_results else 0}")
|
||
|
||
# Create proper context object for result review
|
||
review_context = ReviewContext(
|
||
task_step=task_step,
|
||
task_actions=task_actions,
|
||
action_results=action_results,
|
||
step_result={
|
||
'successful_actions': sum(1 for result in action_results if result.success),
|
||
'total_actions': len(action_results),
|
||
'results': [self._extractResultText(result) for result in action_results if result.success],
|
||
'errors': [result.error for result in action_results if not result.success],
|
||
'documents': [
|
||
{
|
||
'action_index': i,
|
||
'documents_count': len(result.documents) if result.documents else 0,
|
||
'documents': result.documents if result.documents else []
|
||
}
|
||
for i, result in enumerate(action_results)
|
||
]
|
||
},
|
||
workflow_id=workflow.id,
|
||
previous_results=[]
|
||
)
|
||
|
||
# Check workflow status before calling AI service
|
||
self._checkWorkflowStopped()
|
||
|
||
# Use placeholder-based review prompt
|
||
prompt_template = createResultReviewPromptTemplate()
|
||
|
||
# Extract content for placeholders
|
||
user_prompt = extractUserPrompt(review_context)
|
||
review_content = extractReviewContent(review_context)
|
||
|
||
# Create placeholders dictionary
|
||
placeholders = {
|
||
"USER_PROMPT": user_prompt,
|
||
"REVIEW_CONTENT": review_content
|
||
}
|
||
|
||
# Log result review prompt sent to AI
|
||
logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
|
||
logger.info(f"Task: {task_step.objective}")
|
||
logger.info(f"Action Results Count: {len(review_context.action_results) if review_context.action_results else 0}")
|
||
logger.info(f"Task Actions Count: {len(review_context.task_actions) if review_context.task_actions else 0}")
|
||
# Trace result review prompt
|
||
self.writeTraceLog("Result Review Prompt", prompt_template)
|
||
self.writeTraceLog("Result Review Placeholders", placeholders)
|
||
|
||
# Centralized AI call: Result validation (balanced analysis) with placeholders
|
||
options = AiCallOptions(
|
||
operationType=OperationType.ANALYSE_CONTENT,
|
||
priority=Priority.BALANCED,
|
||
compressPrompt=True,
|
||
compressContext=False,
|
||
processingMode=ProcessingMode.ADVANCED,
|
||
maxCost=0.05,
|
||
maxProcessingTime=30
|
||
)
|
||
|
||
response = await self.services.ai.callAi(
|
||
prompt=prompt_template,
|
||
placeholders=placeholders,
|
||
options=options
|
||
)
|
||
|
||
# Log result review response received
|
||
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
||
logger.info(f"Response length: {len(response) if response else 0}")
|
||
# Trace result review response
|
||
self.writeTraceLog("Result Review Response", response)
|
||
|
||
# Inline parseReviewResponse logic here
|
||
json_start = response.find('{')
|
||
json_end = response.rfind('}') + 1
|
||
if json_start == -1 or json_end == 0:
|
||
raise ValueError("No JSON found in review response")
|
||
json_str = response[json_start:json_end]
|
||
|
||
try:
|
||
review = json.loads(json_str)
|
||
except Exception as e:
|
||
logger.error(f"Error parsing review response JSON: {str(e)}")
|
||
review = {}
|
||
if 'status' not in review:
|
||
raise ValueError("Review response missing 'status' field")
|
||
review.setdefault('status', 'unknown')
|
||
review.setdefault('reason', 'No reason provided')
|
||
review.setdefault('quality_score', 5)
|
||
|
||
# Ensure improvements is a list
|
||
improvements = review.get('improvements', [])
|
||
if isinstance(improvements, str):
|
||
# Split string into list if it's a single improvement
|
||
improvements = [improvements.strip()] if improvements.strip() else []
|
||
elif not isinstance(improvements, list):
|
||
improvements = []
|
||
|
||
# Ensure all list fields are properly typed
|
||
met_criteria = review.get('met_criteria', [])
|
||
if not isinstance(met_criteria, list):
|
||
met_criteria = []
|
||
|
||
unmet_criteria = review.get('unmet_criteria', [])
|
||
if not isinstance(unmet_criteria, list):
|
||
unmet_criteria = []
|
||
|
||
review_result = ReviewResult(
|
||
status=review.get('status', 'unknown'),
|
||
reason=review.get('reason', 'No reason provided'),
|
||
improvements=improvements,
|
||
quality_score=review.get('quality_score', 5),
|
||
missing_outputs=[],
|
||
met_criteria=met_criteria,
|
||
unmet_criteria=unmet_criteria,
|
||
confidence=review.get('confidence', 0.5),
|
||
# Extract user-friendly message if available
|
||
userMessage=review.get('userMessage', None)
|
||
)
|
||
|
||
# Enhanced validation logging
|
||
logger.info(f"VALIDATION RESULT - Task: '{task_step.objective}' - Status: {review_result.status.upper()}, Quality: {review_result.quality_score}/10")
|
||
if review_result.status == 'success':
|
||
logger.info(f"VALIDATION SUCCESS - Task completed successfully")
|
||
if review_result.met_criteria:
|
||
logger.info(f"Met criteria: {', '.join(review_result.met_criteria)}")
|
||
elif review_result.status == 'retry':
|
||
logger.warning(f"VALIDATION RETRY - Task requires retry: {review_result.improvements}")
|
||
if review_result.unmet_criteria:
|
||
logger.warning(f"Unmet criteria: {', '.join(review_result.unmet_criteria)}")
|
||
else:
|
||
logger.error(f"VALIDATION FAILED - Task failed: {review_result.reason}")
|
||
|
||
logger.info(f"=== TASK COMPLETION REVIEW FINISHED ===")
|
||
logger.info(f"Final Status: {review_result.status}")
|
||
logger.info(f"Quality Score: {review_result.quality_score}/10")
|
||
logger.info(f"Improvements: {review_result.improvements}")
|
||
logger.info("=== END REVIEW ===")
|
||
|
||
return review_result
|
||
except Exception as e:
|
||
logger.error(f"Error in reviewTaskCompletion: {str(e)}")
|
||
return ReviewResult(
|
||
status='failed',
|
||
reason=str(e),
|
||
quality_score=0
|
||
)
|
||
|
||
async def prepareTaskHandover(self, task_step, task_actions, task_result, workflow):
|
||
try:
|
||
# Check workflow status before preparing task handover
|
||
self._checkWorkflowStopped()
|
||
|
||
# Log handover status summary
|
||
status = task_result.status if task_result else 'unknown'
|
||
|
||
# Handle both TaskResult and ReviewResult objects
|
||
if hasattr(task_result, 'met_criteria'):
|
||
# This is a ReviewResult object
|
||
met = task_result.met_criteria if task_result.met_criteria else []
|
||
review_result = task_result.to_dict()
|
||
else:
|
||
# This is a TaskResult object
|
||
met = []
|
||
review_result = {
|
||
'status': task_result.status if task_result else 'unknown',
|
||
'reason': task_result.error if task_result and hasattr(task_result, 'error') else None,
|
||
'success': task_result.success if task_result else False
|
||
}
|
||
|
||
handover_data = {
|
||
'task_id': task_step.id,
|
||
'task_description': task_step.objective,
|
||
'actions': [action.to_dict() for action in task_actions],
|
||
'review_result': review_result,
|
||
'workflow_id': workflow.id,
|
||
'handover_time': self.services.utils.getUtcTimestamp()
|
||
}
|
||
logger.info(f"Prepared handover for task {task_step.id} in workflow {workflow.id}")
|
||
return handover_data
|
||
except Exception as e:
|
||
logger.error(f"Error in prepareTaskHandover: {str(e)}")
|
||
return {'error': str(e)}
|
||
|
||
def createTaskAction(self, actionData: Dict[str, Any]) -> 'TaskAction':
|
||
"""Creates a new task action."""
|
||
try:
|
||
# Ensure ID is present
|
||
if "id" not in actionData or not actionData["id"]:
|
||
actionData["id"] = f"action_{uuid.uuid4()}"
|
||
|
||
# Ensure required fields
|
||
if "status" not in actionData:
|
||
actionData["status"] = TaskStatus.PENDING
|
||
|
||
if "execMethod" not in actionData:
|
||
logger.error("execMethod is required for task action")
|
||
return None
|
||
|
||
if "execAction" not in actionData:
|
||
logger.error("execAction is required for task action")
|
||
return None
|
||
|
||
if "execParameters" not in actionData:
|
||
actionData["execParameters"] = {}
|
||
|
||
# Use generic field separation based on TaskAction model
|
||
simple_fields, object_fields = services.chatInterface._separate_object_fields(TaskAction, actionData)
|
||
|
||
# Create action in database
|
||
createdAction = services.chatInterface.db.recordCreate(TaskAction, simple_fields)
|
||
|
||
# Convert to TaskAction model
|
||
return TaskAction(
|
||
id=createdAction["id"],
|
||
execMethod=createdAction["execMethod"],
|
||
execAction=createdAction["execAction"],
|
||
execParameters=createdAction.get("execParameters", {}),
|
||
execResultLabel=createdAction.get("execResultLabel"),
|
||
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
|
||
status=createdAction.get("status", TaskStatus.PENDING),
|
||
error=createdAction.get("error"),
|
||
retryCount=createdAction.get("retryCount", 0),
|
||
retryMax=createdAction.get("retryMax", 3),
|
||
processingTime=createdAction.get("processingTime"),
|
||
timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
|
||
result=createdAction.get("result"),
|
||
resultDocuments=createdAction.get("resultDocuments", []),
|
||
userMessage=createdAction.get("userMessage")
|
||
)
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error creating task action: {str(e)}")
|
||
return None
|
||
|
||
# --- Helper action handling methods ---
|
||
|
||
async def executeSingleAction(self, action, workflow, task_step, task_index=None, action_index=None, total_actions=None):
|
||
"""Execute a single action and return ActionResult with enhanced document processing"""
|
||
try:
|
||
# Check workflow status before executing action
|
||
self._checkWorkflowStopped()
|
||
|
||
# Use passed indices or fallback to '?'
|
||
task_num = task_index if task_index is not None else '?'
|
||
action_num = action_index if action_index is not None else '?'
|
||
|
||
logger.info(f"=== TASK {task_num} ACTION {action_num}: {action.execMethod}.{action.execAction} ===")
|
||
|
||
# Log input parameters
|
||
input_docs = action.execParameters.get('documentList', [])
|
||
input_connections = action.execParameters.get('connections', [])
|
||
logger.info(f"Input documents: {input_docs} (type: {type(input_docs)})")
|
||
if input_connections:
|
||
logger.info(f"Input connections: {input_connections}")
|
||
|
||
# Log all action parameters for debugging
|
||
logger.info(f"All action parameters: {action.execParameters}")
|
||
|
||
enhanced_parameters = action.execParameters.copy()
|
||
if action.expectedDocumentFormats:
|
||
enhanced_parameters['expectedDocumentFormats'] = action.expectedDocumentFormats
|
||
logger.info(f"Expected formats: {action.expectedDocumentFormats}")
|
||
|
||
# Check workflow status before executing the action
|
||
self._checkWorkflowStopped()
|
||
|
||
result = await self.executeAction(
|
||
methodName=action.execMethod,
|
||
actionName=action.execAction,
|
||
parameters=enhanced_parameters
|
||
)
|
||
result_label = action.execResultLabel
|
||
|
||
# Trace action result (without document data)
|
||
action_result_trace = {
|
||
"method": action.execMethod,
|
||
"action": action.execAction,
|
||
"success": result.success,
|
||
"error": result.error,
|
||
"resultLabel": result_label,
|
||
"documentsCount": len(result.documents) if result.documents else 0
|
||
}
|
||
self.writeTraceLog("Action Result", action_result_trace)
|
||
|
||
# Process documents from the action result
|
||
created_documents = []
|
||
if result.success:
|
||
action.setSuccess()
|
||
# Extract result text from documents if available, otherwise use empty string
|
||
action.result = ""
|
||
if result.documents and len(result.documents) > 0:
|
||
# Try to get text content from the first document
|
||
first_doc = result.documents[0]
|
||
if isinstance(first_doc.documentData, dict):
|
||
action.result = first_doc.documentData.get("result", "")
|
||
elif isinstance(first_doc.documentData, str):
|
||
action.result = first_doc.documentData
|
||
# Preserve the action's execResultLabel for document routing
|
||
# Action methods should NOT return resultLabel - this is managed by the action handler
|
||
if not action.execResultLabel:
|
||
logger.warning(f"Action {action.execMethod}.{action.execAction} has no execResultLabel set")
|
||
# Always use the action's execResultLabel for message creation to ensure proper document routing
|
||
message_result_label = action.execResultLabel
|
||
|
||
# Create message first to get messageId, then create documents with messageId
|
||
message = await self.createActionMessage(action, result, workflow, message_result_label, [], task_step, task_index)
|
||
if message:
|
||
# Now create documents with the messageId
|
||
created_documents = self.documentGenerator.createDocumentsFromActionResult(result, action, workflow, message.id)
|
||
# Update the message with the created documents
|
||
if created_documents:
|
||
message.documents = created_documents
|
||
# Update the message in the database
|
||
services.chatInterface.updateMessage(message.id, {"documents": [doc.dict() for doc in created_documents]})
|
||
|
||
# Log action results
|
||
logger.info(f"Action completed successfully")
|
||
|
||
if created_documents:
|
||
logger.info(f"Output documents ({len(created_documents)}):")
|
||
for i, doc in enumerate(created_documents):
|
||
logger.info(f" {i+1}. {doc.fileName}")
|
||
|
||
# Log document details for debugging
|
||
logger.info("Document details:")
|
||
for i, doc in enumerate(created_documents):
|
||
logger.info(f" Doc {i+1}: fileName={doc.fileName}, type={type(doc)}")
|
||
logger.info(f" ID: {doc.id}")
|
||
logger.info(f" File ID: {doc.fileId}")
|
||
else:
|
||
logger.info("Output: No documents created")
|
||
else:
|
||
action.setError(result.error or "Action execution failed")
|
||
logger.error(f"Action failed: {result.error}")
|
||
|
||
# ⚠️ IMPORTANT: Create error message for failed actions so user can see what went wrong
|
||
message = await self.createActionMessage(action, result, workflow, result_label, [], task_step, task_index)
|
||
|
||
# Create database log entry for action failure
|
||
services.chatInterface.createLog({
|
||
"workflowId": workflow.id,
|
||
"message": f"❌ **Task {task_num}**\n\n❌ **Action {action_num}/{total_actions}** failed: {result.error}",
|
||
"type": "error"
|
||
})
|
||
|
||
# Log action summary
|
||
logger.info(f"=== TASK {task_num} ACTION {action_num} COMPLETED ===")
|
||
|
||
# Preserve the original documents field from the method result
|
||
# This ensures the standard document format is maintained
|
||
original_documents = result.documents
|
||
|
||
# Extract result text from documents if available
|
||
result_text = self._extractResultText(result)
|
||
|
||
return ActionResult(
|
||
success=result.success,
|
||
documents=original_documents, # Preserve original documents field from method result
|
||
resultLabel=action.execResultLabel, # Always use action's execResultLabel
|
||
error=result.error or ""
|
||
)
|
||
except Exception as e:
|
||
logger.error(f"Error executing single action: {str(e)}")
|
||
action.setError(str(e))
|
||
return ActionResult(
|
||
success=False,
|
||
documents=[], # Empty documents for error case
|
||
resultLabel=action.execResultLabel,
|
||
error=str(e)
|
||
)
|
||
|
||
async def createActionMessage(self, action, result, workflow, result_label=None, created_documents=None, task_step=None, task_index=None):
|
||
"""Create and store a message for the action result in the workflow with enhanced document processing"""
|
||
try:
|
||
# Check workflow status before creating action message
|
||
self._checkWorkflowStopped()
|
||
|
||
if result_label is None:
|
||
result_label = action.execResultLabel
|
||
|
||
# Log delivered documents
|
||
if created_documents:
|
||
logger.info(f"Result label: {result_label} - {len(created_documents)} documents")
|
||
else:
|
||
logger.info(f"Result label: {result_label} - No documents")
|
||
|
||
# Get current workflow context and stats
|
||
workflow_context = self.service.getWorkflowContext()
|
||
workflow_stats = self.service.getWorkflowStats()
|
||
|
||
# Create a more meaningful message that includes task context
|
||
task_objective = task_step.objective if task_step else 'Unknown task'
|
||
|
||
# Add comprehensive workflow context
|
||
current_round = workflow_context.get('currentRound', 0)
|
||
current_task = workflow_context.get('currentTask', 0)
|
||
total_tasks = workflow_stats.get('totalTasks', 0)
|
||
current_action = workflow_context.get('currentAction', 0)
|
||
total_actions = workflow_stats.get('totalActions', 0)
|
||
|
||
# Build a user-friendly message based on success/failure
|
||
if result.success:
|
||
message_text = f"**Action {current_action}/{total_actions} ({action.execMethod}.{action.execAction})**\n\n"
|
||
message_text += f"✅ {task_objective}\n\n"
|
||
else:
|
||
# ⚠️ FAILURE MESSAGE - Show error details to user
|
||
error_details = result.error if result.error else "Unknown error occurred"
|
||
message_text = f"**Action {current_action}/{total_actions} ({action.execMethod}.{action.execAction})**\n\n"
|
||
message_text += f"❌ {task_objective}\n\n"
|
||
message_text += f"{error_details}\n\n"
|
||
|
||
message_data = {
|
||
"workflowId": workflow.id,
|
||
"role": "assistant",
|
||
"message": message_text,
|
||
"status": "step",
|
||
"sequenceNr": len(workflow.messages) + 1,
|
||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||
"actionId": action.id,
|
||
"actionMethod": action.execMethod,
|
||
"actionName": action.execAction,
|
||
"documentsLabel": result_label,
|
||
"documents": created_documents,
|
||
# Add workflow context fields - extract from result_label to match document reference
|
||
"roundNumber": current_round,
|
||
"taskNumber": current_task,
|
||
"actionNumber": current_action,
|
||
"actionProgress": "success" if result.success else "fail"
|
||
}
|
||
|
||
# Add debugging for error messages
|
||
if not result.success:
|
||
logger.info(f"Creating ERROR message: {message_text}")
|
||
logger.info(f"Message data: {message_data}")
|
||
|
||
message = services.chatInterface.createMessage(message_data)
|
||
if message:
|
||
workflow.messages.append(message)
|
||
logger.info(f"Message created: {action.execMethod}.{action.execAction}")
|
||
return message
|
||
else:
|
||
logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
|
||
return None
|
||
except Exception as e:
|
||
logger.error(f"Error creating action message: {str(e)}")
|
||
return None
|
||
|
||
# --- Helper validation methods ---
|
||
|
||
def _validateTaskPlan(self, task_plan: Dict[str, Any]) -> bool:
|
||
try:
|
||
|
||
|
||
if not isinstance(task_plan, dict):
|
||
logger.error("Task plan is not a dictionary")
|
||
return False
|
||
|
||
if 'tasks' not in task_plan or not isinstance(task_plan['tasks'], list):
|
||
logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(task_plan.get('tasks', 'MISSING'))}")
|
||
return False
|
||
|
||
# First pass: collect all task IDs to validate dependencies
|
||
task_ids = set()
|
||
for task in task_plan['tasks']:
|
||
if not isinstance(task, dict):
|
||
logger.error(f"Task is not a dictionary: {type(task)}")
|
||
return False
|
||
if 'id' not in task:
|
||
logger.error(f"Task missing 'id' field: {task}")
|
||
return False
|
||
task_ids.add(task['id'])
|
||
|
||
# Second pass: validate each task
|
||
for i, task in enumerate(task_plan['tasks']):
|
||
|
||
|
||
if not isinstance(task, dict):
|
||
logger.error(f"Task {i} is not a dictionary: {type(task)}")
|
||
return False
|
||
|
||
required_fields = ['id', 'objective', 'success_criteria']
|
||
missing_fields = [field for field in required_fields if field not in task]
|
||
if missing_fields:
|
||
logger.error(f"Task {i} missing required fields: {missing_fields}")
|
||
return False
|
||
|
||
# Check for duplicate IDs (shouldn't happen after first pass, but safety check)
|
||
if task['id'] in task_ids and list(task_plan['tasks']).count(task['id']) > 1:
|
||
logger.error(f"Task {i} has duplicate ID: {task['id']}")
|
||
return False
|
||
|
||
dependencies = task.get('dependencies', [])
|
||
if not isinstance(dependencies, list):
|
||
logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
|
||
return False
|
||
|
||
for dep in dependencies:
|
||
if dep not in task_ids and dep != 'task_0':
|
||
logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(task_ids) + ['task_0']})")
|
||
return False
|
||
|
||
logger.info(f"Task plan validation successful with {len(task_ids)} tasks")
|
||
return True
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error validating task plan: {str(e)}")
|
||
return False
|
||
|
||
def _extractActionNumberFromLabel(self, label: str) -> int:
|
||
"""Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
|
||
try:
|
||
if not label or not isinstance(label, str):
|
||
return 0
|
||
|
||
# Parse label format: round{round}_task{task}_action{action}_{context}
|
||
if '_action' in label:
|
||
action_part = label.split('_action')[1]
|
||
if action_part and '_' in action_part:
|
||
action_number = action_part.split('_')[0]
|
||
return int(action_number)
|
||
|
||
return 0
|
||
except Exception as e:
|
||
logger.warning(f"Could not extract action number from label '{label}': {str(e)}")
|
||
return 0
|
||
|
||
def _validateActions(self, actions: List[Dict[str, Any]], context) -> bool:
|
||
try:
|
||
if not isinstance(actions, list):
|
||
logger.error("Actions must be a list")
|
||
return False
|
||
if len(actions) == 0:
|
||
logger.warning("No actions generated")
|
||
return False
|
||
for i, action in enumerate(actions):
|
||
if not isinstance(action, dict):
|
||
logger.error(f"Action {i} must be a dictionary")
|
||
return False
|
||
required_fields = ['method', 'action', 'parameters', 'resultLabel']
|
||
missing_fields = []
|
||
for field in required_fields:
|
||
if field not in action or not action[field]:
|
||
missing_fields.append(field)
|
||
if missing_fields:
|
||
logger.error(f"Action {i} missing required fields: {missing_fields}")
|
||
return False
|
||
result_label = action.get('resultLabel', '')
|
||
if not result_label.startswith('round'):
|
||
logger.error(f"Action {i} result label must start with 'round': {result_label}")
|
||
return False
|
||
parameters = action.get('parameters', {})
|
||
if not isinstance(parameters, dict):
|
||
logger.error(f"Action {i} parameters must be a dictionary")
|
||
return False
|
||
logger.info(f"Successfully validated {len(actions)} actions")
|
||
return True
|
||
except Exception as e:
|
||
logger.error(f"Error validating actions: {str(e)}")
|
||
return False
|
||
|
||
def _extractResultText(self, result: ActionResult) -> str:
|
||
"""Extract result text from ActionResult documents"""
|
||
if not result.success or not result.documents:
|
||
return ""
|
||
|
||
# Try to get text content from the first document
|
||
first_doc = result.documents[0]
|
||
if isinstance(first_doc.documentData, dict):
|
||
return first_doc.documentData.get("result", "")
|
||
elif isinstance(first_doc.documentData, str):
|
||
return first_doc.documentData
|
||
return ""
|
||
|
||
# PHASE 4: Workflow Object Update Rules Implementation
|
||
|
||
def updateWorkflowAfterTaskPlanCreated(self, total_tasks: int):
|
||
"""
|
||
Update workflow object after task plan is created.
|
||
Rule: Set currentTask=1, currentAction=0, totalTasks=extracted_total_tasks, totalActions=0
|
||
"""
|
||
try:
|
||
update_data = {
|
||
"currentTask": 1,
|
||
"currentAction": 0,
|
||
"totalTasks": total_tasks,
|
||
"totalActions": 0
|
||
}
|
||
|
||
# Update workflow object
|
||
self.workflow.currentTask = 1
|
||
self.workflow.currentAction = 0
|
||
self.workflow.totalTasks = total_tasks
|
||
self.workflow.totalActions = 0
|
||
|
||
# Update in database
|
||
services.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
||
logger.info(f"Updated workflow {self.workflow.id} after task plan created: {update_data}")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error updating workflow after task plan created: {str(e)}")
|
||
|
||
def updateWorkflowBeforeExecutingTask(self, task_number: int):
|
||
"""
|
||
Update workflow object before executing a task.
|
||
Rule: Set currentTask=task_number, currentAction=0, totalActions=0
|
||
"""
|
||
try:
|
||
update_data = {
|
||
"currentTask": task_number,
|
||
"currentAction": 0,
|
||
"totalActions": 0
|
||
}
|
||
|
||
# Update workflow object
|
||
self.workflow.currentTask = task_number
|
||
self.workflow.currentAction = 0
|
||
self.workflow.totalActions = 0
|
||
|
||
# Update in database
|
||
services.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
||
logger.info(f"Updated workflow {self.workflow.id} before executing task {task_number}: {update_data}")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error updating workflow before executing task: {str(e)}")
|
||
|
||
def updateWorkflowAfterActionPlanning(self, total_actions: int):
|
||
"""
|
||
Update workflow object after action planning for current task.
|
||
Rule: Set totalActions=extracted_total_actions for THIS task
|
||
"""
|
||
try:
|
||
update_data = {
|
||
"totalActions": total_actions
|
||
}
|
||
|
||
# Update workflow object
|
||
self.workflow.totalActions = total_actions
|
||
|
||
# Update in database
|
||
services.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
||
logger.info(f"Updated workflow {self.workflow.id} after action planning: {update_data}")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error updating workflow after action planning: {str(e)}")
|
||
|
||
def updateWorkflowBeforeExecutingAction(self, action_number: int):
|
||
"""
|
||
Update workflow object before executing an action.
|
||
Rule: Set currentAction=action_number
|
||
"""
|
||
try:
|
||
update_data = {
|
||
"currentAction": action_number
|
||
}
|
||
|
||
# Update workflow object
|
||
self.workflow.currentAction = action_number
|
||
|
||
# Update in database
|
||
services.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
||
logger.info(f"Updated workflow {self.workflow.id} before executing action {action_number}: {update_data}")
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error updating workflow before executing action: {str(e)}")
|
||
|
||
def setWorkflowTotals(self, total_tasks: int = None, total_actions: int = None):
|
||
"""Set total counts for workflow progress tracking and update database"""
|
||
try:
|
||
update_data = {}
|
||
|
||
if total_tasks is not None:
|
||
self.workflow.totalTasks = total_tasks
|
||
update_data["totalTasks"] = total_tasks
|
||
|
||
if total_actions is not None:
|
||
self.workflow.totalActions = total_actions
|
||
update_data["totalActions"] = total_actions
|
||
|
||
# Update workflow object in database if we have changes
|
||
if update_data:
|
||
services.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
||
logger.info(f"Updated workflow {self.workflow.id} totals in database: {update_data}")
|
||
|
||
logger.debug(f"Updated workflow totals: Tasks {self.workflow.totalTasks if hasattr(self.workflow, 'totalTasks') else 'N/A'}, Actions {self.workflow.totalActions if hasattr(self.workflow, 'totalActions') else 'N/A'}")
|
||
except Exception as e:
|
||
logger.error(f"Error setting workflow totals: {str(e)}")
|
||
|
||
def resetWorkflowForNewSession(self):
|
||
"""Reset workflow values for a new workflow session"""
|
||
try:
|
||
# Reset all workflow progress values to initial state
|
||
self.workflow.currentRound = 0
|
||
self.workflow.currentTask = 0
|
||
self.workflow.currentAction = 0
|
||
self.workflow.totalTasks = 0
|
||
self.workflow.totalActions = 0
|
||
self.workflow.status = 'ready'
|
||
|
||
# Update workflow object in database with reset values
|
||
services.chatInterface.updateWorkflow(self.workflow.id, {
|
||
"currentRound": 0,
|
||
"currentTask": 0,
|
||
"currentAction": 0,
|
||
"totalTasks": 0,
|
||
"totalActions": 0,
|
||
"status": "ready"
|
||
})
|
||
|
||
logger.info("Workflow reset for new session - all values set to initial state and updated in database")
|
||
except Exception as e:
|
||
logger.error(f"Error resetting workflow for new session: {str(e)}")
|
||
|
||
# ===== Functions moved from serviceCenter =====
|
||
|
||
async def executeAction(self, methodName: str, actionName: str, parameters: Dict[str, Any]) -> ActionResult:
|
||
"""Execute a method action"""
|
||
try:
|
||
if methodName not in methods:
|
||
raise ValueError(f"Unknown method: {methodName}")
|
||
|
||
method = methods[methodName]
|
||
if actionName not in method['actions']:
|
||
raise ValueError(f"Unknown action: {actionName} for method {methodName}")
|
||
|
||
action = method['actions'][actionName]
|
||
|
||
# Execute the action
|
||
return await action['method'](parameters)
|
||
|
||
except Exception as e:
|
||
logger.error(f"Error executing method {methodName}.{actionName}: {str(e)}")
|
||
raise
|
||
|
||
def writeTraceLog(self, contextText: str, data: Any) -> None:
|
||
"""Write trace data to configured trace file if in debug mode"""
|
||
try:
|
||
import logging
|
||
import os
|
||
from datetime import datetime, UTC
|
||
|
||
# Only write if logger is in debug mode
|
||
if logger.level > logging.DEBUG:
|
||
return
|
||
|
||
# Get log directory from configuration
|
||
logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
|
||
if not os.path.isabs(logDir):
|
||
# If relative path, make it relative to the gateway directory
|
||
gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||
logDir = os.path.join(gatewayDir, logDir)
|
||
|
||
# Ensure log directory exists
|
||
os.makedirs(logDir, exist_ok=True)
|
||
|
||
# Create trace file path
|
||
trace_file = os.path.join(logDir, "log_trace.log")
|
||
|
||
# Format the trace entry
|
||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||
trace_entry = f"[{timestamp}] {contextText}\n"
|
||
|
||
# Add data if provided
|
||
if data is not None:
|
||
if isinstance(data, (dict, list)):
|
||
import json
|
||
trace_entry += f"Data: {json.dumps(data, indent=2, default=str)}\n"
|
||
else:
|
||
trace_entry += f"Data: {str(data)}\n"
|
||
|
||
trace_entry += "-" * 80 + "\n\n"
|
||
|
||
# Write to trace file
|
||
with open(trace_file, "a", encoding="utf-8") as f:
|
||
f.write(trace_entry)
|
||
|
||
except Exception as e:
|
||
# Don't log trace errors to avoid recursion
|
||
pass
|
||
|
||
def clearTraceLog(self) -> None:
|
||
"""Clear the trace log file"""
|
||
try:
|
||
import logging
|
||
import os
|
||
|
||
# Get log directory from configuration
|
||
logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
|
||
if not os.path.isabs(logDir):
|
||
# If relative path, make it relative to the gateway directory
|
||
gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||
logDir = os.path.join(gatewayDir, logDir)
|
||
|
||
# Create trace file path
|
||
trace_file = os.path.join(logDir, "log_trace.log")
|
||
|
||
# Only clear if logger is in debug mode
|
||
if logger.level > logging.DEBUG:
|
||
# Delete file if not in debug mode
|
||
if os.path.exists(trace_file):
|
||
os.remove(trace_file)
|
||
return
|
||
|
||
# Create empty file if in debug mode
|
||
with open(trace_file, "w", encoding="utf-8") as f:
|
||
f.write("")
|
||
|
||
except Exception as e:
|
||
# Don't log trace errors to avoid recursion
|
||
pass
|