1527 lines
No EOL
75 KiB
Python
1527 lines
No EOL
75 KiB
Python
# handlingTasks.py
|
|
# Refactored for clarity and consolidation
|
|
|
|
import asyncio
|
|
import logging
|
|
import json
|
|
import time
|
|
from typing import Dict, Any, Optional, List, Union
|
|
from datetime import datetime, UTC
|
|
from modules.interfaces.interfaceChatModel import (
|
|
TaskStatus, TaskStep, TaskContext, TaskAction, ReviewResult, TaskPlan, WorkflowResult, TaskResult, ReviewContext, ActionResult
|
|
)
|
|
from modules.shared.timezoneUtils import get_utc_timestamp
|
|
from .executionState import TaskExecutionState
|
|
from .promptFactory import (
|
|
createTaskPlanningPrompt,
|
|
createActionDefinitionPrompt,
|
|
createResultReviewPrompt
|
|
)
|
|
from modules.chat.documents.documentGeneration import DocumentGenerator
|
|
import uuid
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class WorkflowStoppedException(Exception):
|
|
"""Exception raised when a workflow is stopped by the user."""
|
|
pass
|
|
|
|
class HandlingTasks:
|
|
def __init__(self, chatInterface, service, workflow=None):
|
|
self.chatInterface = chatInterface
|
|
self.service = service
|
|
self.workflow = workflow
|
|
self.documentGenerator = DocumentGenerator(service)
|
|
|
|
def _checkWorkflowStopped(self):
|
|
"""
|
|
Check if workflow has been stopped by user and raise exception if so.
|
|
This function centralizes all workflow stop checking logic to avoid code duplication.
|
|
"""
|
|
try:
|
|
# Get the current workflow status from the database to avoid stale data
|
|
current_workflow = self.chatInterface.getWorkflow(self.service.workflow.id)
|
|
if current_workflow and current_workflow.status == "stopped":
|
|
logger.info("Workflow stopped by user, aborting execution")
|
|
raise WorkflowStoppedException("Workflow was stopped by user")
|
|
except WorkflowStoppedException:
|
|
# Re-raise the WorkflowStoppedException immediately
|
|
raise
|
|
except Exception as e:
|
|
# If we can't get the current status due to other database issues, fall back to the in-memory object
|
|
logger.warning(f"Could not check current workflow status from database: {str(e)}")
|
|
if self.service.workflow.status == "stopped":
|
|
logger.info("Workflow stopped by user (from in-memory object), aborting execution")
|
|
raise WorkflowStoppedException("Workflow was stopped by user")
|
|
|
|
async def generateTaskPlan(self, userInput: str, workflow) -> TaskPlan:
|
|
"""Generate a high-level task plan for the workflow."""
|
|
try:
|
|
# Check workflow status before generating task plan
|
|
self._checkWorkflowStopped()
|
|
|
|
logger.info(f"=== STARTING TASK PLAN GENERATION ===")
|
|
logger.info(f"Workflow ID: {workflow.id}")
|
|
logger.info(f"User Input: {userInput}")
|
|
available_docs = self.service.getAvailableDocuments(workflow)
|
|
|
|
# Check workflow status before calling AI service
|
|
self._checkWorkflowStopped()
|
|
|
|
# Create proper context object for task planning
|
|
# For task planning, we need to create a minimal TaskStep since TaskContext requires it
|
|
from modules.interfaces.interfaceChatModel import TaskStep
|
|
planning_task_step = TaskStep(
|
|
id="planning",
|
|
objective=userInput,
|
|
dependencies=[],
|
|
success_criteria=[],
|
|
estimated_complexity="medium"
|
|
)
|
|
|
|
task_planning_context = TaskContext(
|
|
task_step=planning_task_step,
|
|
workflow=workflow,
|
|
workflow_id=workflow.id,
|
|
available_documents=available_docs,
|
|
available_connections=[],
|
|
previous_results=[],
|
|
previous_handover=None,
|
|
improvements=[],
|
|
retry_count=0,
|
|
previous_action_results=[],
|
|
previous_review_result=None,
|
|
is_regeneration=False,
|
|
failure_patterns=[],
|
|
failed_actions=[],
|
|
successful_actions=[],
|
|
criteria_progress={
|
|
'met_criteria': set(),
|
|
'unmet_criteria': set(),
|
|
'attempt_history': []
|
|
}
|
|
)
|
|
|
|
# Generate the task planning prompt
|
|
task_planning_prompt = createTaskPlanningPrompt(task_planning_context, self.service)
|
|
|
|
# Log the full task planning prompt being sent to AI for debugging
|
|
logger.info("=== TASK PLANNING PROMPT SENT TO AI ===")
|
|
logger.info(f"User Input: {userInput}")
|
|
logger.info(f"Available Documents: {available_docs}")
|
|
logger.info("=== FULL TASK PLANNING PROMPT ===")
|
|
logger.info(task_planning_prompt)
|
|
logger.info("=== END TASK PLANNING PROMPT ===")
|
|
|
|
prompt = await self.service.callAiTextAdvanced(task_planning_prompt)
|
|
|
|
# Check if AI response is valid
|
|
if not prompt:
|
|
raise ValueError("AI service returned no response for task planning")
|
|
|
|
# Log the full AI response for task planning
|
|
logger.info("=== TASK PLANNING AI RESPONSE RECEIVED ===")
|
|
logger.info(f"Response length: {len(prompt) if prompt else 0}")
|
|
logger.debug("=== FULL TASK PLANNING AI RESPONSE ===")
|
|
logger.debug(prompt)
|
|
logger.debug("=== END TASK PLANNING AI RESPONSE ===")
|
|
|
|
# Inline _parseTaskPlanResponse logic
|
|
try:
|
|
json_start = prompt.find('{')
|
|
json_end = prompt.rfind('}') + 1
|
|
if json_start == -1 or json_end == 0:
|
|
raise ValueError("No JSON found in response")
|
|
json_str = prompt[json_start:json_end]
|
|
task_plan_dict = json.loads(json_str)
|
|
|
|
if 'tasks' not in task_plan_dict:
|
|
raise ValueError("Task plan missing 'tasks' field")
|
|
except Exception as e:
|
|
logger.error(f"Error parsing task plan response: {str(e)}")
|
|
task_plan_dict = {'tasks': []}
|
|
|
|
if not self._validateTaskPlan(task_plan_dict):
|
|
logger.error("Generated task plan failed validation")
|
|
logger.error(f"AI Response: {prompt}")
|
|
logger.error(f"Parsed Task Plan: {json.dumps(task_plan_dict, indent=2)}")
|
|
raise Exception("AI-generated task plan failed validation - AI is required for task planning")
|
|
|
|
if not task_plan_dict.get('tasks'):
|
|
raise ValueError("Task plan contains no tasks")
|
|
|
|
# LANGUAGE DETECTION: Determine user language once for the entire workflow
|
|
# Priority: 1. languageUserDetected from AI response, 2. service.user.language, 3. "en"
|
|
detected_language = task_plan_dict.get('languageUserDetected', '').strip()
|
|
service_user_language = getattr(self.service.user, 'language', '') if self.service and self.service.user else ''
|
|
|
|
if detected_language and len(detected_language) == 2: # Valid language code like "en", "de", "fr"
|
|
user_language = detected_language
|
|
logger.info(f"Using detected language from AI response: {user_language}")
|
|
elif service_user_language and len(service_user_language) == 2:
|
|
user_language = service_user_language
|
|
logger.info(f"Using language from service user object: {user_language}")
|
|
else:
|
|
user_language = "en"
|
|
logger.info(f"Using default language: {user_language}")
|
|
|
|
# Set the detected language in the service for use throughout the workflow
|
|
if self.service and self.service.user:
|
|
self.service.user.language = user_language
|
|
logger.info(f"Set workflow user language to: {user_language}")
|
|
|
|
tasks = []
|
|
for i, task_dict in enumerate(task_plan_dict.get('tasks', [])):
|
|
if not isinstance(task_dict, dict):
|
|
logger.warning(f"Skipping invalid task {i+1}: not a dictionary")
|
|
continue
|
|
|
|
# Map old 'description' field to new 'objective' field
|
|
if 'description' in task_dict and 'objective' not in task_dict:
|
|
task_dict['objective'] = task_dict.pop('description')
|
|
|
|
try:
|
|
task = TaskStep(**task_dict)
|
|
tasks.append(task)
|
|
except Exception as e:
|
|
logger.warning(f"Skipping invalid task {i+1}: {str(e)}")
|
|
continue
|
|
|
|
if not tasks:
|
|
raise ValueError("No valid tasks could be created from AI response")
|
|
|
|
task_plan = TaskPlan(
|
|
overview=task_plan_dict.get('overview', ''),
|
|
tasks=tasks,
|
|
userMessage=task_plan_dict.get('userMessage', '')
|
|
)
|
|
|
|
# Set workflow totals for progress tracking
|
|
total_tasks = len(tasks)
|
|
if total_tasks == 0:
|
|
raise ValueError("Task plan contains no valid tasks")
|
|
|
|
self.setWorkflowTotals(total_tasks=total_tasks)
|
|
|
|
logger.info(f"Task plan generated successfully with {len(tasks)} tasks")
|
|
logger.info(f"Workflow user language set to: {user_language}")
|
|
|
|
# PHASE 3: Create chat message containing the task plan
|
|
await self.createTaskPlanMessage(task_plan, workflow)
|
|
|
|
return task_plan
|
|
except Exception as e:
|
|
logger.error(f"Error in generateTaskPlan: {str(e)}")
|
|
raise
|
|
|
|
async def createTaskPlanMessage(self, task_plan: TaskPlan, workflow):
|
|
"""Create a chat message containing the task plan with user-friendly messages"""
|
|
try:
|
|
# Build task plan summary
|
|
task_summary = f"📋 **Task Plan**\n\n"
|
|
|
|
# Get overall user message from task plan if available
|
|
overall_message = task_plan.userMessage
|
|
if overall_message:
|
|
task_summary += f"{overall_message}\n\n"
|
|
|
|
# Add each task with its user message
|
|
for i, task in enumerate(task_plan.tasks):
|
|
if task.userMessage:
|
|
task_summary += f"💬 {task.userMessage}\n"
|
|
task_summary += "\n"
|
|
|
|
|
|
# Create workflow message
|
|
message_data = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": task_summary,
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"documentsLabel": "task_plan",
|
|
"documents": [],
|
|
# Add workflow context fields - use current workflow round instead of hardcoded 1
|
|
"roundNumber": workflow.currentRound, # Use current workflow round
|
|
"taskNumber": 1, # Task plan is before individual tasks; to keep 1, that UI not filtering the message
|
|
"actionNumber": 0,
|
|
# Add task progress status
|
|
"taskProgress": "pending"
|
|
}
|
|
|
|
message = self.chatInterface.createMessage(message_data)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
|
|
# PHASE 4: Update workflow object after task plan created
|
|
# Set currentTask=1, currentAction=0, totalTasks=len(task_plan.tasks), totalActions=0
|
|
self.updateWorkflowAfterTaskPlanCreated(len(task_plan.tasks))
|
|
|
|
logger.info(f"Task plan message created with {len(task_plan.tasks)} tasks")
|
|
else:
|
|
logger.error("Failed to create task plan message")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating task plan message: {str(e)}")
|
|
|
|
async def generateTaskActions(self, task_step, workflow, previous_results=None, enhanced_context=None) -> List[TaskAction]:
|
|
"""Generate actions for a given task step."""
|
|
try:
|
|
# Check workflow status before generating actions
|
|
self._checkWorkflowStopped()
|
|
|
|
retry_info = f" (Retry #{enhanced_context.retry_count})" if enhanced_context and enhanced_context.retry_count > 0 else ""
|
|
logger.info(f"Generating actions for task: {task_step.objective}{retry_info}")
|
|
|
|
# Log criteria progress if this is a retry
|
|
if enhanced_context and hasattr(enhanced_context, 'criteria_progress') and enhanced_context.criteria_progress is not None:
|
|
progress = enhanced_context.criteria_progress
|
|
logger.info(f"Retry attempt {enhanced_context.retry_count} - Criteria progress:")
|
|
if progress.get('met_criteria'):
|
|
logger.info(f" Met criteria: {', '.join(progress['met_criteria'])}")
|
|
if progress.get('unmet_criteria'):
|
|
logger.warning(f" Unmet criteria: {', '.join(progress['unmet_criteria'])}")
|
|
|
|
# Show improvement trends
|
|
if progress.get('attempt_history'):
|
|
recent_attempts = progress['attempt_history'][-2:] # Last 2 attempts
|
|
if len(recent_attempts) >= 2:
|
|
prev_score = recent_attempts[0].get('quality_score', 0)
|
|
curr_score = recent_attempts[1].get('quality_score', 0)
|
|
if curr_score > prev_score:
|
|
logger.info(f" Quality improving: {prev_score} -> {curr_score}")
|
|
elif curr_score < prev_score:
|
|
logger.warning(f" Quality declining: {prev_score} -> {curr_score}")
|
|
else:
|
|
logger.info(f" Quality stable: {curr_score}")
|
|
|
|
# Enhanced retry context logging
|
|
if enhanced_context and enhanced_context.retry_count > 0:
|
|
logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===")
|
|
logger.info(f"Retry Count: {enhanced_context.retry_count}")
|
|
logger.info(f"Previous Improvements: {enhanced_context.improvements}")
|
|
logger.info(f"Previous Review Result: {enhanced_context.previous_review_result}")
|
|
logger.info(f"Failure Patterns: {enhanced_context.failure_patterns}")
|
|
logger.info(f"Failed Actions: {enhanced_context.failed_actions}")
|
|
logger.info(f"Successful Actions: {enhanced_context.successful_actions}")
|
|
logger.info("=== END RETRY CONTEXT ===")
|
|
|
|
available_docs = self.service.getAvailableDocuments(workflow)
|
|
available_connections = self.service.getConnectionReferenceList()
|
|
|
|
# Log available resources for debugging
|
|
logger.info("=== AVAILABLE RESOURCES FOR ACTION GENERATION ===")
|
|
logger.info(f"Available Documents: {available_docs}")
|
|
# Note: available_docs is now a string description, not a list
|
|
logger.info(f"Available Connections: {len(available_connections) if available_connections else 0}")
|
|
if available_connections:
|
|
for i, conn in enumerate(available_connections[:5]): # Show first 5
|
|
logger.info(f" Conn {i+1}: {conn}")
|
|
if len(available_connections) > 5:
|
|
logger.info(f" ... and {len(available_connections) - 5} more connections")
|
|
logger.info("=== END AVAILABLE RESOURCES ===")
|
|
|
|
# Create proper context object for action definition
|
|
if enhanced_context and isinstance(enhanced_context, TaskContext):
|
|
# Use existing TaskContext if provided
|
|
action_context = TaskContext(
|
|
task_step=enhanced_context.task_step,
|
|
workflow=enhanced_context.workflow,
|
|
workflow_id=enhanced_context.workflow_id,
|
|
available_documents=enhanced_context.available_documents or available_docs,
|
|
available_connections=enhanced_context.available_connections or available_connections,
|
|
previous_results=enhanced_context.previous_results or previous_results or [],
|
|
previous_handover=enhanced_context.previous_handover,
|
|
improvements=enhanced_context.improvements or [],
|
|
retry_count=enhanced_context.retry_count or 0,
|
|
previous_action_results=enhanced_context.previous_action_results or [],
|
|
previous_review_result=enhanced_context.previous_review_result,
|
|
is_regeneration=enhanced_context.is_regeneration or False,
|
|
failure_patterns=enhanced_context.failure_patterns or [],
|
|
failed_actions=enhanced_context.failed_actions or [],
|
|
successful_actions=enhanced_context.successful_actions or [],
|
|
criteria_progress=enhanced_context.criteria_progress
|
|
)
|
|
else:
|
|
# Create new context from scratch
|
|
action_context = TaskContext(
|
|
task_step=task_step,
|
|
workflow=workflow,
|
|
workflow_id=workflow.id,
|
|
available_documents=available_docs,
|
|
available_connections=available_connections,
|
|
previous_results=previous_results or [],
|
|
previous_handover=None,
|
|
improvements=[],
|
|
retry_count=0,
|
|
previous_action_results=[],
|
|
previous_review_result=None,
|
|
is_regeneration=False,
|
|
failure_patterns=[],
|
|
failed_actions=[],
|
|
successful_actions=[],
|
|
criteria_progress=None
|
|
)
|
|
|
|
# Check workflow status before calling AI service
|
|
self._checkWorkflowStopped()
|
|
|
|
# Log the final action context being sent to AI
|
|
logger.info("=== FINAL ACTION CONTEXT FOR AI ===")
|
|
logger.info(f"Task Step ID: {action_context.task_step.id if action_context.task_step else 'None'}")
|
|
logger.info(f"Task Step Objective: {action_context.task_step.objective if action_context.task_step else 'None'}")
|
|
logger.info(f"Workflow ID: {action_context.workflow_id}")
|
|
logger.info(f"Available Documents: {action_context.available_documents or 'No documents available'}")
|
|
logger.info(f"Available Connections Count: {len(action_context.available_connections) if action_context.available_connections else 0}")
|
|
logger.info(f"Previous Results Count: {len(action_context.previous_results) if action_context.previous_results else 0}")
|
|
logger.info(f"Retry Count: {action_context.retry_count}")
|
|
logger.info(f"Is Regeneration: {action_context.is_regeneration}")
|
|
logger.info("=== END ACTION CONTEXT ===")
|
|
|
|
# Generate the action definition prompt
|
|
action_prompt = await createActionDefinitionPrompt(action_context, self.service)
|
|
prompt = await self.service.callAiTextAdvanced(action_prompt)
|
|
|
|
# Check if AI response is valid
|
|
if not prompt:
|
|
raise ValueError("AI service returned no response")
|
|
|
|
# Log the full AI response for debugging
|
|
logger.debug("=== FULL AI RESPONSE ===")
|
|
logger.debug(prompt)
|
|
logger.debug("=== END AI RESPONSE ===")
|
|
|
|
# Inline parseActionResponse logic here
|
|
json_start = prompt.find('{')
|
|
json_end = prompt.rfind('}') + 1
|
|
if json_start == -1 or json_end == 0:
|
|
raise ValueError("No JSON found in response")
|
|
json_str = prompt[json_start:json_end]
|
|
|
|
try:
|
|
action_data = json.loads(json_str)
|
|
except Exception as e:
|
|
logger.error(f"Error parsing action response JSON: {str(e)}")
|
|
action_data = {}
|
|
|
|
if 'actions' not in action_data:
|
|
raise ValueError("Action response missing 'actions' field")
|
|
|
|
actions = action_data['actions']
|
|
if not actions:
|
|
raise ValueError("Action response contains empty actions list")
|
|
|
|
if not isinstance(actions, list):
|
|
raise ValueError(f"Action response 'actions' field is not a list: {type(actions)}")
|
|
|
|
if not self._validateActions(actions, action_context):
|
|
logger.error("Generated actions failed validation")
|
|
raise Exception("AI-generated actions failed validation - AI is required for action generation")
|
|
|
|
# Convert to TaskAction objects
|
|
task_actions = []
|
|
for i, a in enumerate(actions):
|
|
if not isinstance(a, dict):
|
|
logger.warning(f"Skipping invalid action {i+1}: not a dictionary")
|
|
continue
|
|
|
|
task_action = self.createTaskAction({
|
|
"execMethod": a.get('method', 'unknown'),
|
|
"execAction": a.get('action', 'unknown'),
|
|
"execParameters": a.get('parameters', {}),
|
|
"execResultLabel": a.get('resultLabel', ''),
|
|
"expectedDocumentFormats": a.get('expectedDocumentFormats', None),
|
|
"status": TaskStatus.PENDING,
|
|
# Extract user-friendly message if available
|
|
"userMessage": a.get('userMessage', None)
|
|
})
|
|
|
|
if task_action:
|
|
task_actions.append(task_action)
|
|
else:
|
|
logger.warning(f"Skipping invalid action {i+1}: failed to create TaskAction")
|
|
|
|
valid_actions = [ta for ta in task_actions if ta]
|
|
|
|
if not valid_actions:
|
|
raise ValueError("No valid actions could be created from AI response")
|
|
|
|
return valid_actions
|
|
except Exception as e:
|
|
logger.error(f"Error in generateTaskActions: {str(e)}")
|
|
return []
|
|
|
|
async def executeTask(self, task_step, workflow, context, task_index=None, total_tasks=None) -> TaskResult:
|
|
"""Execute all actions for a task step, with state management and retries."""
|
|
logger.info(f"=== STARTING TASK {task_index or '?'}: {task_step.objective} ===")
|
|
|
|
# PHASE 4: Update workflow object before executing task
|
|
# Set currentTask=task_number, currentAction=0, totalActions=0
|
|
if task_index is not None:
|
|
self.updateWorkflowBeforeExecutingTask(task_index)
|
|
|
|
# Update workflow context for this task
|
|
if task_index is not None:
|
|
self.service.setWorkflowContext(task_number=task_index)
|
|
# Remove the increment call that causes double-increment bug
|
|
|
|
# Create database log entry for task start in format expected by frontend
|
|
if task_index is not None:
|
|
|
|
# Create a task start message for the user
|
|
task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
|
|
task_start_message = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": f"🚀 **Task {task_progress}**",
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"documentsLabel": f"task_{task_index}_start",
|
|
"documents": [],
|
|
# Add workflow context fields
|
|
"roundNumber": workflow.currentRound, # Use current workflow round
|
|
"taskNumber": task_index,
|
|
"actionNumber": 0,
|
|
# Add task progress status
|
|
"taskProgress": "running"
|
|
}
|
|
|
|
# Add user-friendly message if available
|
|
if task_step.userMessage:
|
|
task_start_message["message"] += f"\n\n💬 {task_step.userMessage}"
|
|
|
|
message = self.chatInterface.createMessage(task_start_message)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Task start message created for task {task_index}")
|
|
|
|
state = TaskExecutionState(task_step)
|
|
retry_context = context
|
|
max_retries = state.max_retries
|
|
for attempt in range(max_retries):
|
|
logger.info(f"Task execution attempt {attempt+1}/{max_retries}")
|
|
|
|
# Check workflow status before starting task execution
|
|
self._checkWorkflowStopped()
|
|
|
|
# Update retry context with current attempt information
|
|
if retry_context:
|
|
retry_context.retry_count = attempt + 1
|
|
|
|
actions = await self.generateTaskActions(task_step, workflow, previous_results=retry_context.previous_results, enhanced_context=retry_context)
|
|
|
|
# Log total actions count for this task
|
|
total_actions = len(actions) if actions else 0
|
|
logger.info(f"Task {task_index or '?'} has {total_actions} actions")
|
|
|
|
# PHASE 4: Update workflow object after action planning
|
|
# Set totalActions=extracted_total_actions for THIS task
|
|
self.updateWorkflowAfterActionPlanning(total_actions)
|
|
|
|
# Set workflow action total for this task (0 if no actions generated)
|
|
self.setWorkflowTotals(total_actions=total_actions)
|
|
|
|
if not actions:
|
|
logger.error("No actions defined for task step, aborting task execution")
|
|
break
|
|
|
|
action_results = []
|
|
for action_idx, action in enumerate(actions):
|
|
# Check workflow status before each action execution
|
|
self._checkWorkflowStopped()
|
|
|
|
# PHASE 4: Update workflow object before executing action
|
|
# Set currentAction=action_number
|
|
action_number = action_idx + 1
|
|
self.updateWorkflowBeforeExecutingAction(action_number)
|
|
|
|
# Update workflow context for this action
|
|
self.service.setWorkflowContext(action_number=action_number)
|
|
# Remove the increment call that causes double-increment bug
|
|
|
|
# Log action start in format expected by frontend
|
|
logger.info(f"Task {task_index} - Starting action {action_number}/{total_actions}")
|
|
|
|
# Create an action start message for the user
|
|
action_start_message = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": f"⚡ **Action {action_number}/{total_actions}** (Method {action.execMethod}.{action.execAction})",
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"documentsLabel": f"action_{action_number}_start",
|
|
"documents": [],
|
|
# Add action progress status
|
|
"actionProgress": "running"
|
|
}
|
|
|
|
# Add user-friendly message if available
|
|
if action.userMessage:
|
|
action_start_message["message"] += f"\n\n💬 {action.userMessage}"
|
|
|
|
# Add workflow context fields - use current workflow round instead of hardcoded 1
|
|
action_start_message.update({
|
|
"roundNumber": workflow.currentRound, # Use current workflow round
|
|
"taskNumber": task_index,
|
|
"actionNumber": action_number
|
|
})
|
|
|
|
message = self.chatInterface.createMessage(action_start_message)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Action start message created for action {action_number}")
|
|
|
|
# Pass action index to executeSingleAction with task context
|
|
result = await self.executeSingleAction(action, workflow, task_step, task_index, action_number, total_actions)
|
|
action_results.append(result)
|
|
if result.success:
|
|
state.addSuccessfulAction(result)
|
|
else:
|
|
state.addFailedAction(result)
|
|
|
|
# Check workflow status before review
|
|
self._checkWorkflowStopped()
|
|
|
|
review_result = await self.reviewTaskCompletion(task_step, actions, action_results, workflow)
|
|
success = review_result.status == 'success'
|
|
feedback = review_result.reason
|
|
error = None if success else review_result.reason
|
|
if success:
|
|
logger.info(f"=== TASK {task_index or '?'} COMPLETED SUCCESSFULLY: {task_step.objective} ===")
|
|
|
|
# Create a task completion message for the user
|
|
task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
|
|
|
|
# Enhanced completion message with criteria details
|
|
completion_message = f"🎯 **Task {task_progress}**\n\n✅ {feedback or 'Task completed successfully'}"
|
|
|
|
# Add criteria status if available
|
|
if hasattr(review_result, 'met_criteria') and review_result.met_criteria:
|
|
for criterion in review_result.met_criteria:
|
|
completion_message += f"\n• {criterion}"
|
|
|
|
if hasattr(review_result, 'quality_score'):
|
|
completion_message += f"\n📊 Score {review_result.quality_score}/10"
|
|
|
|
task_completion_message = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": completion_message,
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"documentsLabel": f"task_{task_index}_completion",
|
|
"documents": [],
|
|
# Add workflow context fields
|
|
"roundNumber": workflow.currentRound, # Use current workflow round
|
|
"taskNumber": task_index,
|
|
"actionNumber": 0,
|
|
# Add task progress status
|
|
"taskProgress": "success"
|
|
}
|
|
|
|
message = self.chatInterface.createMessage(task_completion_message)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Task completion message created for task {task_index}")
|
|
|
|
return TaskResult(
|
|
taskId=task_step.id,
|
|
status=TaskStatus.COMPLETED,
|
|
success=True,
|
|
feedback=feedback,
|
|
error=None
|
|
)
|
|
|
|
elif review_result.status == 'retry' and state.canRetry():
|
|
logger.warning(f"Task step '{task_step.objective}' requires retry: {review_result.improvements}")
|
|
|
|
# Enhanced logging of criteria status
|
|
if review_result.met_criteria:
|
|
logger.info(f"Met criteria: {', '.join(review_result.met_criteria)}")
|
|
if review_result.unmet_criteria:
|
|
logger.warning(f"Unmet criteria: {', '.join(review_result.unmet_criteria)}")
|
|
|
|
state.incrementRetryCount()
|
|
|
|
# Update retry context with retry information and criteria tracking
|
|
if retry_context:
|
|
retry_context.retry_count = state.retry_count
|
|
retry_context.improvements = review_result.improvements
|
|
retry_context.previous_action_results = action_results
|
|
retry_context.previous_review_result = review_result
|
|
retry_context.is_regeneration = True
|
|
retry_context.failure_patterns = state.getFailurePatterns()
|
|
retry_context.failed_actions = state.failed_actions
|
|
retry_context.successful_actions = state.successful_actions
|
|
|
|
# Track criteria progress across retries
|
|
if not hasattr(retry_context, 'criteria_progress'):
|
|
retry_context.criteria_progress = {
|
|
'met_criteria': set(),
|
|
'unmet_criteria': set(),
|
|
'attempt_history': []
|
|
}
|
|
|
|
# Update criteria progress - convert lists to sets for deduplication
|
|
if review_result.met_criteria:
|
|
retry_context.criteria_progress['met_criteria'].update(review_result.met_criteria)
|
|
if review_result.unmet_criteria:
|
|
retry_context.criteria_progress['unmet_criteria'].update(review_result.unmet_criteria)
|
|
|
|
# Record this attempt's criteria status
|
|
attempt_record = {
|
|
'attempt': state.retry_count,
|
|
'met_criteria': review_result.met_criteria or [],
|
|
'unmet_criteria': review_result.unmet_criteria or [],
|
|
'quality_score': review_result.quality_score,
|
|
'improvements': review_result.improvements or []
|
|
}
|
|
retry_context.criteria_progress['attempt_history'].append(attempt_record)
|
|
|
|
logger.info(f"Criteria progress after {state.retry_count} attempts:")
|
|
logger.info(f" Total met: {len(retry_context.criteria_progress['met_criteria'])}")
|
|
logger.info(f" Total unmet: {len(retry_context.criteria_progress['unmet_criteria'])}")
|
|
if retry_context.criteria_progress['met_criteria']:
|
|
logger.info(f" Met criteria: {', '.join(retry_context.criteria_progress['met_criteria'])}")
|
|
if retry_context.criteria_progress['unmet_criteria']:
|
|
logger.info(f" Unmet criteria: {', '.join(retry_context.criteria_progress['unmet_criteria'])}")
|
|
|
|
# Log retry summary for debugging
|
|
logger.info(f"=== RETRY #{state.retry_count} SUMMARY ===")
|
|
logger.info(f"Task: {task_step.objective}")
|
|
logger.info(f"Quality Score: {review_result.quality_score}/10")
|
|
logger.info(f"Status: {review_result.status}")
|
|
logger.info(f"Improvements Needed: {review_result.improvements}")
|
|
logger.info(f"Reason: {review_result.reason}")
|
|
logger.info("=== END RETRY SUMMARY ===")
|
|
|
|
# Create retry message for user
|
|
retry_message = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": f"🔄 **Task {task_index}** needs retry: {review_result.improvements}",
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"documentsLabel": f"task_{task_index}_retry",
|
|
"documents": [],
|
|
"roundNumber": workflow.currentRound,
|
|
"taskNumber": task_index,
|
|
"actionNumber": 0,
|
|
"taskProgress": "retry"
|
|
}
|
|
|
|
message = self.chatInterface.createMessage(retry_message)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
|
|
continue
|
|
else:
|
|
logger.error(f"=== TASK {task_index or '?'} FAILED: {task_step.objective} after {attempt+1} attempts ===")
|
|
task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
|
|
|
|
# Create user-facing error message for task failure
|
|
error_message = f"**Task {task_progress}**\n\n❌ '{task_step.objective}' {attempt+1}x failed\n\n"
|
|
|
|
# Add specific error details if available
|
|
if review_result and hasattr(review_result, 'reason') and review_result.reason:
|
|
error_message += f"{review_result.reason}\n\n"
|
|
|
|
# Add criteria progress information if available
|
|
if retry_context and hasattr(retry_context, 'criteria_progress'):
|
|
progress = retry_context.criteria_progress
|
|
error_message += f"📊 **Details**\n"
|
|
if progress.get('met_criteria'):
|
|
error_message += f"✅ Met criteria: {', '.join(progress['met_criteria'])}\n"
|
|
if progress.get('unmet_criteria'):
|
|
error_message += f"❌ Unmet criteria: {', '.join(progress['unmet_criteria'])}\n"
|
|
error_message += "\n"
|
|
|
|
# Add retry information
|
|
error_message += f"Attempts: {attempt+1}\n"
|
|
error_message += f"Status: Will retry automatically\n\n"
|
|
error_message += "The system will attempt to retry this task. Please wait..."
|
|
|
|
# Create workflow message for user
|
|
message_data = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": error_message,
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"actionId": None,
|
|
"actionMethod": "task",
|
|
"actionName": "task_retry",
|
|
"documentsLabel": None,
|
|
"documents": [],
|
|
# Add workflow context fields
|
|
"roundNumber": workflow.currentRound, # Use current workflow round
|
|
"taskNumber": task_index,
|
|
"actionNumber": 0,
|
|
# Add task progress status
|
|
"taskProgress": "retry"
|
|
}
|
|
|
|
try:
|
|
message = self.chatInterface.createMessage(message_data)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Created user-facing retry message for failed task: {task_step.objective}")
|
|
else:
|
|
logger.error(f"Failed to create user-facing retry message for failed task: {task_step.objective}")
|
|
except Exception as e:
|
|
logger.error(f"Error creating user-facing retry message: {str(e)}")
|
|
|
|
return TaskResult(
|
|
taskId=task_step.id,
|
|
status=TaskStatus.FAILED,
|
|
success=False,
|
|
feedback=feedback,
|
|
error=review_result.reason if review_result and hasattr(review_result, 'reason') else "Task failed after retry attempts"
|
|
)
|
|
logger.error(f"=== TASK {task_index or '?'} FAILED AFTER ALL RETRIES: {task_step.objective} ===")
|
|
|
|
# Create user-facing error message for task failure
|
|
error_message = f"**Task {task_index or '?'}**\n\n❌ '{task_step.objective}' failed after all retries\n\n"
|
|
error_message += f"{task_step.objective}\n\n"
|
|
|
|
# Add specific error details if available
|
|
if retry_context and hasattr(retry_context, 'previous_review_result') and retry_context.previous_review_result:
|
|
reason = retry_context.previous_review_result.reason or ''
|
|
if reason and reason != "Task failed after all retries.":
|
|
error_message += f"{reason}\n\n"
|
|
|
|
# Add retry information
|
|
error_message += f"Retries attempted: {retry_context.retry_count if retry_context else 'Unknown'}\n"
|
|
error_message += f"Status: Task failed permanently"
|
|
|
|
# Create workflow message for user
|
|
message_data = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": error_message,
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"actionId": None,
|
|
"actionMethod": "task",
|
|
"actionName": "task_failure",
|
|
"documentsLabel": None,
|
|
"documents": [],
|
|
# Add workflow context fields
|
|
"roundNumber": workflow.currentRound, # Use current workflow round
|
|
"taskNumber": task_index,
|
|
"actionNumber": 0,
|
|
# NEW: Add task progress status
|
|
"taskProgress": "fail"
|
|
}
|
|
|
|
try:
|
|
message = self.chatInterface.createMessage(message_data)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Created user-facing error message for failed task: {task_step.objective}")
|
|
else:
|
|
logger.error(f"Failed to create user-facing error message for failed task: {task_step.objective}")
|
|
except Exception as e:
|
|
logger.error(f"Error creating user-facing error message: {str(e)}")
|
|
|
|
return TaskResult(
|
|
taskId=task_step.id,
|
|
status=TaskStatus.FAILED,
|
|
success=False,
|
|
feedback="Task failed after all retries.",
|
|
error="Task failed after all retries."
|
|
)
|
|
|
|
async def reviewTaskCompletion(self, task_step, task_actions, action_results, workflow):
|
|
try:
|
|
# Check workflow status before reviewing task completion
|
|
self._checkWorkflowStopped()
|
|
|
|
logger.info(f"=== STARTING TASK COMPLETION REVIEW ===")
|
|
logger.info(f"Task: {task_step.objective}")
|
|
logger.info(f"Actions executed: {len(task_actions) if task_actions else 0}")
|
|
logger.info(f"Action results: {len(action_results) if action_results else 0}")
|
|
|
|
# Create proper context object for result review
|
|
review_context = ReviewContext(
|
|
task_step=task_step,
|
|
task_actions=task_actions,
|
|
action_results=action_results,
|
|
step_result={
|
|
'successful_actions': sum(1 for result in action_results if result.success),
|
|
'total_actions': len(action_results),
|
|
'results': [self._extractResultText(result) for result in action_results if result.success],
|
|
'errors': [result.error for result in action_results if not result.success],
|
|
'documents': [
|
|
{
|
|
'action_index': i,
|
|
'documents_count': len(result.documents) if result.documents else 0,
|
|
'documents': result.documents if result.documents else []
|
|
}
|
|
for i, result in enumerate(action_results)
|
|
]
|
|
},
|
|
workflow_id=workflow.id,
|
|
previous_results=[]
|
|
)
|
|
|
|
# Check workflow status before calling AI service
|
|
self._checkWorkflowStopped()
|
|
|
|
# Use promptFactory for review prompt
|
|
prompt = createResultReviewPrompt(review_context, self.service)
|
|
|
|
# Log the full result review prompt being sent to AI for debugging
|
|
logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
|
|
logger.info(f"Task: {task_step.objective}")
|
|
logger.info(f"Action Results Count: {len(review_context.action_results) if review_context.action_results else 0}")
|
|
logger.info(f"Task Actions Count: {len(review_context.task_actions) if review_context.task_actions else 0}")
|
|
logger.info("=== FULL RESULT REVIEW PROMPT ===")
|
|
logger.info(prompt)
|
|
logger.info("=== END RESULT REVIEW PROMPT ===")
|
|
|
|
response = await self.service.callAiTextAdvanced(prompt)
|
|
|
|
# Log the full AI response for result review
|
|
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
|
logger.info(f"Response length: {len(response) if response else 0}")
|
|
logger.debug("=== FULL RESULT REVIEW AI RESPONSE ===")
|
|
logger.debug(response)
|
|
logger.debug("=== END RESULT REVIEW AI RESPONSE ===")
|
|
|
|
# Inline parseReviewResponse logic here
|
|
json_start = response.find('{')
|
|
json_end = response.rfind('}') + 1
|
|
if json_start == -1 or json_end == 0:
|
|
raise ValueError("No JSON found in review response")
|
|
json_str = response[json_start:json_end]
|
|
|
|
try:
|
|
review = json.loads(json_str)
|
|
except Exception as e:
|
|
logger.error(f"Error parsing review response JSON: {str(e)}")
|
|
review = {}
|
|
if 'status' not in review:
|
|
raise ValueError("Review response missing 'status' field")
|
|
review.setdefault('status', 'unknown')
|
|
review.setdefault('reason', 'No reason provided')
|
|
review.setdefault('quality_score', 5)
|
|
|
|
# Ensure improvements is a list
|
|
improvements = review.get('improvements', [])
|
|
if isinstance(improvements, str):
|
|
# Split string into list if it's a single improvement
|
|
improvements = [improvements.strip()] if improvements.strip() else []
|
|
elif not isinstance(improvements, list):
|
|
improvements = []
|
|
|
|
# Ensure all list fields are properly typed
|
|
met_criteria = review.get('met_criteria', [])
|
|
if not isinstance(met_criteria, list):
|
|
met_criteria = []
|
|
|
|
unmet_criteria = review.get('unmet_criteria', [])
|
|
if not isinstance(unmet_criteria, list):
|
|
unmet_criteria = []
|
|
|
|
review_result = ReviewResult(
|
|
status=review.get('status', 'unknown'),
|
|
reason=review.get('reason', 'No reason provided'),
|
|
improvements=improvements,
|
|
quality_score=review.get('quality_score', 5),
|
|
missing_outputs=[],
|
|
met_criteria=met_criteria,
|
|
unmet_criteria=unmet_criteria,
|
|
confidence=review.get('confidence', 0.5),
|
|
# Extract user-friendly message if available
|
|
userMessage=review.get('userMessage', None)
|
|
)
|
|
|
|
# Enhanced validation logging
|
|
logger.info(f"VALIDATION RESULT - Task: '{task_step.objective}' - Status: {review_result.status.upper()}, Quality: {review_result.quality_score}/10")
|
|
if review_result.status == 'success':
|
|
logger.info(f"VALIDATION SUCCESS - Task completed successfully")
|
|
if review_result.met_criteria:
|
|
logger.info(f"Met criteria: {', '.join(review_result.met_criteria)}")
|
|
elif review_result.status == 'retry':
|
|
logger.warning(f"VALIDATION RETRY - Task requires retry: {review_result.improvements}")
|
|
if review_result.unmet_criteria:
|
|
logger.warning(f"Unmet criteria: {', '.join(review_result.unmet_criteria)}")
|
|
else:
|
|
logger.error(f"VALIDATION FAILED - Task failed: {review_result.reason}")
|
|
|
|
logger.info(f"=== TASK COMPLETION REVIEW FINISHED ===")
|
|
logger.info(f"Final Status: {review_result.status}")
|
|
logger.info(f"Quality Score: {review_result.quality_score}/10")
|
|
logger.info(f"Improvements: {review_result.improvements}")
|
|
logger.info("=== END REVIEW ===")
|
|
|
|
return review_result
|
|
except Exception as e:
|
|
logger.error(f"Error in reviewTaskCompletion: {str(e)}")
|
|
return ReviewResult(
|
|
status='failed',
|
|
reason=str(e),
|
|
quality_score=0
|
|
)
|
|
|
|
async def prepareTaskHandover(self, task_step, task_actions, task_result, workflow):
|
|
try:
|
|
# Check workflow status before preparing task handover
|
|
self._checkWorkflowStopped()
|
|
|
|
# Log handover status summary
|
|
status = task_result.status if task_result else 'unknown'
|
|
|
|
# Handle both TaskResult and ReviewResult objects
|
|
if hasattr(task_result, 'met_criteria'):
|
|
# This is a ReviewResult object
|
|
met = task_result.met_criteria if task_result.met_criteria else []
|
|
review_result = task_result.to_dict()
|
|
else:
|
|
# This is a TaskResult object
|
|
met = []
|
|
review_result = {
|
|
'status': task_result.status if task_result else 'unknown',
|
|
'reason': task_result.error if task_result and hasattr(task_result, 'error') else None,
|
|
'success': task_result.success if task_result else False
|
|
}
|
|
|
|
handover_data = {
|
|
'task_id': task_step.id,
|
|
'task_description': task_step.objective,
|
|
'actions': [action.to_dict() for action in task_actions],
|
|
'review_result': review_result,
|
|
'workflow_id': workflow.id,
|
|
'handover_time': get_utc_timestamp()
|
|
}
|
|
logger.info(f"Prepared handover for task {task_step.id} in workflow {workflow.id}")
|
|
return handover_data
|
|
except Exception as e:
|
|
logger.error(f"Error in prepareTaskHandover: {str(e)}")
|
|
return {'error': str(e)}
|
|
|
|
def createTaskAction(self, actionData: Dict[str, Any]) -> 'TaskAction':
|
|
"""Creates a new task action."""
|
|
try:
|
|
# Ensure ID is present
|
|
if "id" not in actionData or not actionData["id"]:
|
|
actionData["id"] = f"action_{uuid.uuid4()}"
|
|
|
|
# Ensure required fields
|
|
if "status" not in actionData:
|
|
actionData["status"] = TaskStatus.PENDING
|
|
|
|
if "execMethod" not in actionData:
|
|
logger.error("execMethod is required for task action")
|
|
return None
|
|
|
|
if "execAction" not in actionData:
|
|
logger.error("execAction is required for task action")
|
|
return None
|
|
|
|
if "execParameters" not in actionData:
|
|
actionData["execParameters"] = {}
|
|
|
|
# Use generic field separation based on TaskAction model
|
|
simple_fields, object_fields = self.chatInterface._separate_object_fields(TaskAction, actionData)
|
|
|
|
# Create action in database
|
|
createdAction = self.chatInterface.db.recordCreate(TaskAction, simple_fields)
|
|
|
|
# Convert to TaskAction model
|
|
return TaskAction(
|
|
id=createdAction["id"],
|
|
execMethod=createdAction["execMethod"],
|
|
execAction=createdAction["execAction"],
|
|
execParameters=createdAction.get("execParameters", {}),
|
|
execResultLabel=createdAction.get("execResultLabel"),
|
|
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
|
|
status=createdAction.get("status", TaskStatus.PENDING),
|
|
error=createdAction.get("error"),
|
|
retryCount=createdAction.get("retryCount", 0),
|
|
retryMax=createdAction.get("retryMax", 3),
|
|
processingTime=createdAction.get("processingTime"),
|
|
timestamp=float(createdAction.get("timestamp", get_utc_timestamp())),
|
|
result=createdAction.get("result"),
|
|
resultDocuments=createdAction.get("resultDocuments", []),
|
|
userMessage=createdAction.get("userMessage")
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating task action: {str(e)}")
|
|
return None
|
|
|
|
# --- Helper action handling methods ---
|
|
|
|
async def executeSingleAction(self, action, workflow, task_step, task_index=None, action_index=None, total_actions=None):
|
|
"""Execute a single action and return ActionResult with enhanced document processing"""
|
|
try:
|
|
# Check workflow status before executing action
|
|
self._checkWorkflowStopped()
|
|
|
|
# Use passed indices or fallback to '?'
|
|
task_num = task_index if task_index is not None else '?'
|
|
action_num = action_index if action_index is not None else '?'
|
|
|
|
logger.info(f"=== TASK {task_num} ACTION {action_num}: {action.execMethod}.{action.execAction} ===")
|
|
|
|
# Log input parameters
|
|
input_docs = action.execParameters.get('documentList', [])
|
|
input_connections = action.execParameters.get('connections', [])
|
|
logger.info(f"Input documents: {input_docs} (type: {type(input_docs)})")
|
|
if input_connections:
|
|
logger.info(f"Input connections: {input_connections}")
|
|
|
|
# Log all action parameters for debugging
|
|
logger.info(f"All action parameters: {action.execParameters}")
|
|
|
|
enhanced_parameters = action.execParameters.copy()
|
|
if action.expectedDocumentFormats:
|
|
enhanced_parameters['expectedDocumentFormats'] = action.expectedDocumentFormats
|
|
logger.info(f"Expected formats: {action.expectedDocumentFormats}")
|
|
|
|
# Check workflow status before executing the action
|
|
self._checkWorkflowStopped()
|
|
|
|
result = await self.service.executeAction(
|
|
methodName=action.execMethod,
|
|
actionName=action.execAction,
|
|
parameters=enhanced_parameters
|
|
)
|
|
result_label = action.execResultLabel
|
|
|
|
# Process documents from the action result
|
|
created_documents = []
|
|
if result.success:
|
|
action.setSuccess()
|
|
# Extract result text from documents if available, otherwise use empty string
|
|
action.result = ""
|
|
if result.documents and len(result.documents) > 0:
|
|
# Try to get text content from the first document
|
|
first_doc = result.documents[0]
|
|
if isinstance(first_doc.documentData, dict):
|
|
action.result = first_doc.documentData.get("result", "")
|
|
elif isinstance(first_doc.documentData, str):
|
|
action.result = first_doc.documentData
|
|
# Preserve the action's execResultLabel for document routing
|
|
# Action methods should NOT return resultLabel - this is managed by the action handler
|
|
if not action.execResultLabel:
|
|
logger.warning(f"Action {action.execMethod}.{action.execAction} has no execResultLabel set")
|
|
# Always use the action's execResultLabel for message creation to ensure proper document routing
|
|
message_result_label = action.execResultLabel
|
|
|
|
# Create message first to get messageId, then create documents with messageId
|
|
message = await self.createActionMessage(action, result, workflow, message_result_label, [], task_step, task_index)
|
|
if message:
|
|
# Now create documents with the messageId
|
|
created_documents = self.documentGenerator.createDocumentsFromActionResult(result, action, workflow, message.id)
|
|
# Update the message with the created documents
|
|
if created_documents:
|
|
message.documents = created_documents
|
|
# Update the message in the database
|
|
self.chatInterface.updateMessage(message.id, {"documents": [doc.to_dict() for doc in created_documents]})
|
|
|
|
# Log action results
|
|
logger.info(f"Action completed successfully")
|
|
|
|
if created_documents:
|
|
logger.info(f"Output documents ({len(created_documents)}):")
|
|
for i, doc in enumerate(created_documents):
|
|
logger.info(f" {i+1}. {doc.fileName}")
|
|
|
|
# Log document details for debugging
|
|
logger.info("Document details:")
|
|
for i, doc in enumerate(created_documents):
|
|
logger.info(f" Doc {i+1}: fileName={doc.fileName}, type={type(doc)}")
|
|
logger.info(f" ID: {doc.id}")
|
|
logger.info(f" File ID: {doc.fileId}")
|
|
else:
|
|
logger.info("Output: No documents created")
|
|
else:
|
|
action.setError(result.error or "Action execution failed")
|
|
logger.error(f"Action failed: {result.error}")
|
|
|
|
# ⚠️ IMPORTANT: Create error message for failed actions so user can see what went wrong
|
|
message = await self.createActionMessage(action, result, workflow, result_label, [], task_step, task_index)
|
|
|
|
# Create database log entry for action failure
|
|
self.chatInterface.createLog({
|
|
"workflowId": workflow.id,
|
|
"message": f"❌ **Task {task_num}**\n\n❌ **Action {action_num}/{total_actions}** failed: {result.error}",
|
|
"type": "error"
|
|
})
|
|
|
|
# Log action summary
|
|
logger.info(f"=== TASK {task_num} ACTION {action_num} COMPLETED ===")
|
|
|
|
# Preserve the original documents field from the method result
|
|
# This ensures the standard document format is maintained
|
|
original_documents = result.documents
|
|
|
|
# Extract result text from documents if available
|
|
result_text = self._extractResultText(result)
|
|
|
|
return ActionResult(
|
|
success=result.success,
|
|
documents=original_documents, # Preserve original documents field from method result
|
|
resultLabel=action.execResultLabel, # Always use action's execResultLabel
|
|
error=result.error or ""
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error executing single action: {str(e)}")
|
|
action.setError(str(e))
|
|
return ActionResult(
|
|
success=False,
|
|
documents=[], # Empty documents for error case
|
|
resultLabel=action.execResultLabel,
|
|
error=str(e)
|
|
)
|
|
|
|
async def createActionMessage(self, action, result, workflow, result_label=None, created_documents=None, task_step=None, task_index=None):
|
|
"""Create and store a message for the action result in the workflow with enhanced document processing"""
|
|
try:
|
|
# Check workflow status before creating action message
|
|
self._checkWorkflowStopped()
|
|
|
|
if result_label is None:
|
|
result_label = action.execResultLabel
|
|
|
|
# Log delivered documents
|
|
if created_documents:
|
|
logger.info(f"Result label: {result_label} - {len(created_documents)} documents")
|
|
else:
|
|
logger.info(f"Result label: {result_label} - No documents")
|
|
|
|
# Get current workflow context and stats
|
|
workflow_context = self.service.getWorkflowContext()
|
|
workflow_stats = self.service.getWorkflowStats()
|
|
|
|
# Create a more meaningful message that includes task context
|
|
task_objective = task_step.objective if task_step else 'Unknown task'
|
|
|
|
# Add comprehensive workflow context
|
|
current_round = workflow_context.get('currentRound', 0)
|
|
current_task = workflow_context.get('currentTask', 0)
|
|
total_tasks = workflow_stats.get('totalTasks', 0)
|
|
current_action = workflow_context.get('currentAction', 0)
|
|
total_actions = workflow_stats.get('totalActions', 0)
|
|
|
|
# Build a user-friendly message based on success/failure
|
|
if result.success:
|
|
message_text = f"**Action {current_action}/{total_actions} ({action.execMethod}.{action.execAction})**\n\n"
|
|
message_text += f"✅ {task_objective}\n\n"
|
|
else:
|
|
# ⚠️ FAILURE MESSAGE - Show error details to user
|
|
error_details = result.error if result.error else "Unknown error occurred"
|
|
message_text = f"**Action {current_action}/{total_actions} ({action.execMethod}.{action.execAction})**\n\n"
|
|
message_text += f"❌ {task_objective}\n\n"
|
|
message_text += f"{error_details}\n\n"
|
|
|
|
message_data = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": message_text,
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": get_utc_timestamp(),
|
|
"actionId": action.id,
|
|
"actionMethod": action.execMethod,
|
|
"actionName": action.execAction,
|
|
"documentsLabel": result_label,
|
|
"documents": created_documents,
|
|
# Add workflow context fields - extract from result_label to match document reference
|
|
"roundNumber": current_round,
|
|
"taskNumber": current_task,
|
|
"actionNumber": current_action,
|
|
"actionProgress": "success" if result.success else "fail"
|
|
}
|
|
|
|
# Add debugging for error messages
|
|
if not result.success:
|
|
logger.info(f"Creating ERROR message: {message_text}")
|
|
logger.info(f"Message data: {message_data}")
|
|
|
|
message = self.chatInterface.createMessage(message_data)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Message created: {action.execMethod}.{action.execAction}")
|
|
return message
|
|
else:
|
|
logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
|
|
return None
|
|
except Exception as e:
|
|
logger.error(f"Error creating action message: {str(e)}")
|
|
return None
|
|
|
|
# --- Helper validation methods ---
|
|
|
|
def _validateTaskPlan(self, task_plan: Dict[str, Any]) -> bool:
|
|
try:
|
|
|
|
|
|
if not isinstance(task_plan, dict):
|
|
logger.error("Task plan is not a dictionary")
|
|
return False
|
|
|
|
if 'tasks' not in task_plan or not isinstance(task_plan['tasks'], list):
|
|
logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(task_plan.get('tasks', 'MISSING'))}")
|
|
return False
|
|
|
|
# First pass: collect all task IDs to validate dependencies
|
|
task_ids = set()
|
|
for task in task_plan['tasks']:
|
|
if not isinstance(task, dict):
|
|
logger.error(f"Task is not a dictionary: {type(task)}")
|
|
return False
|
|
if 'id' not in task:
|
|
logger.error(f"Task missing 'id' field: {task}")
|
|
return False
|
|
task_ids.add(task['id'])
|
|
|
|
# Second pass: validate each task
|
|
for i, task in enumerate(task_plan['tasks']):
|
|
|
|
|
|
if not isinstance(task, dict):
|
|
logger.error(f"Task {i} is not a dictionary: {type(task)}")
|
|
return False
|
|
|
|
required_fields = ['id', 'objective', 'success_criteria']
|
|
missing_fields = [field for field in required_fields if field not in task]
|
|
if missing_fields:
|
|
logger.error(f"Task {i} missing required fields: {missing_fields}")
|
|
return False
|
|
|
|
# Check for duplicate IDs (shouldn't happen after first pass, but safety check)
|
|
if task['id'] in task_ids and list(task_plan['tasks']).count(task['id']) > 1:
|
|
logger.error(f"Task {i} has duplicate ID: {task['id']}")
|
|
return False
|
|
|
|
dependencies = task.get('dependencies', [])
|
|
if not isinstance(dependencies, list):
|
|
logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
|
|
return False
|
|
|
|
for dep in dependencies:
|
|
if dep not in task_ids and dep != 'task_0':
|
|
logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(task_ids) + ['task_0']})")
|
|
return False
|
|
|
|
logger.info(f"Task plan validation successful with {len(task_ids)} tasks")
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error validating task plan: {str(e)}")
|
|
return False
|
|
|
|
def _extractActionNumberFromLabel(self, label: str) -> int:
|
|
"""Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
|
|
try:
|
|
if not label or not isinstance(label, str):
|
|
return 0
|
|
|
|
# Parse label format: round{round}_task{task}_action{action}_{context}
|
|
if '_action' in label:
|
|
action_part = label.split('_action')[1]
|
|
if action_part and '_' in action_part:
|
|
action_number = action_part.split('_')[0]
|
|
return int(action_number)
|
|
|
|
return 0
|
|
except Exception as e:
|
|
logger.warning(f"Could not extract action number from label '{label}': {str(e)}")
|
|
return 0
|
|
|
|
def _validateActions(self, actions: List[Dict[str, Any]], context) -> bool:
|
|
try:
|
|
if not isinstance(actions, list):
|
|
logger.error("Actions must be a list")
|
|
return False
|
|
if len(actions) == 0:
|
|
logger.warning("No actions generated")
|
|
return False
|
|
for i, action in enumerate(actions):
|
|
if not isinstance(action, dict):
|
|
logger.error(f"Action {i} must be a dictionary")
|
|
return False
|
|
required_fields = ['method', 'action', 'parameters', 'resultLabel']
|
|
missing_fields = []
|
|
for field in required_fields:
|
|
if field not in action or not action[field]:
|
|
missing_fields.append(field)
|
|
if missing_fields:
|
|
logger.error(f"Action {i} missing required fields: {missing_fields}")
|
|
return False
|
|
result_label = action.get('resultLabel', '')
|
|
if not result_label.startswith('round'):
|
|
logger.error(f"Action {i} result label must start with 'round': {result_label}")
|
|
return False
|
|
parameters = action.get('parameters', {})
|
|
if not isinstance(parameters, dict):
|
|
logger.error(f"Action {i} parameters must be a dictionary")
|
|
return False
|
|
logger.info(f"Successfully validated {len(actions)} actions")
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"Error validating actions: {str(e)}")
|
|
return False
|
|
|
|
def _extractResultText(self, result: ActionResult) -> str:
|
|
"""Extract result text from ActionResult documents"""
|
|
if not result.success or not result.documents:
|
|
return ""
|
|
|
|
# Try to get text content from the first document
|
|
first_doc = result.documents[0]
|
|
if isinstance(first_doc.documentData, dict):
|
|
return first_doc.documentData.get("result", "")
|
|
elif isinstance(first_doc.documentData, str):
|
|
return first_doc.documentData
|
|
return ""
|
|
|
|
# PHASE 4: Workflow Object Update Rules Implementation
|
|
|
|
def updateWorkflowAfterTaskPlanCreated(self, total_tasks: int):
|
|
"""
|
|
Update workflow object after task plan is created.
|
|
Rule: Set currentTask=1, currentAction=0, totalTasks=extracted_total_tasks, totalActions=0
|
|
"""
|
|
try:
|
|
update_data = {
|
|
"currentTask": 1,
|
|
"currentAction": 0,
|
|
"totalTasks": total_tasks,
|
|
"totalActions": 0
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.currentTask = 1
|
|
self.workflow.currentAction = 0
|
|
self.workflow.totalTasks = total_tasks
|
|
self.workflow.totalActions = 0
|
|
|
|
# Update in database
|
|
self.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
|
logger.info(f"Updated workflow {self.workflow.id} after task plan created: {update_data}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow after task plan created: {str(e)}")
|
|
|
|
def updateWorkflowBeforeExecutingTask(self, task_number: int):
|
|
"""
|
|
Update workflow object before executing a task.
|
|
Rule: Set currentTask=task_number, currentAction=0, totalActions=0
|
|
"""
|
|
try:
|
|
update_data = {
|
|
"currentTask": task_number,
|
|
"currentAction": 0,
|
|
"totalActions": 0
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.currentTask = task_number
|
|
self.workflow.currentAction = 0
|
|
self.workflow.totalActions = 0
|
|
|
|
# Update in database
|
|
self.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
|
logger.info(f"Updated workflow {self.workflow.id} before executing task {task_number}: {update_data}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow before executing task: {str(e)}")
|
|
|
|
def updateWorkflowAfterActionPlanning(self, total_actions: int):
|
|
"""
|
|
Update workflow object after action planning for current task.
|
|
Rule: Set totalActions=extracted_total_actions for THIS task
|
|
"""
|
|
try:
|
|
update_data = {
|
|
"totalActions": total_actions
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.totalActions = total_actions
|
|
|
|
# Update in database
|
|
self.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
|
logger.info(f"Updated workflow {self.workflow.id} after action planning: {update_data}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow after action planning: {str(e)}")
|
|
|
|
def updateWorkflowBeforeExecutingAction(self, action_number: int):
|
|
"""
|
|
Update workflow object before executing an action.
|
|
Rule: Set currentAction=action_number
|
|
"""
|
|
try:
|
|
update_data = {
|
|
"currentAction": action_number
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.currentAction = action_number
|
|
|
|
# Update in database
|
|
self.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
|
logger.info(f"Updated workflow {self.workflow.id} before executing action {action_number}: {update_data}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow before executing action: {str(e)}")
|
|
|
|
def setWorkflowTotals(self, total_tasks: int = None, total_actions: int = None):
|
|
"""Set total counts for workflow progress tracking and update database"""
|
|
try:
|
|
update_data = {}
|
|
|
|
if total_tasks is not None:
|
|
self.workflow.totalTasks = total_tasks
|
|
update_data["totalTasks"] = total_tasks
|
|
|
|
if total_actions is not None:
|
|
self.workflow.totalActions = total_actions
|
|
update_data["totalActions"] = total_actions
|
|
|
|
# Update workflow object in database if we have changes
|
|
if update_data:
|
|
self.chatInterface.updateWorkflow(self.workflow.id, update_data)
|
|
logger.info(f"Updated workflow {self.workflow.id} totals in database: {update_data}")
|
|
|
|
logger.debug(f"Updated workflow totals: Tasks {self.workflow.totalTasks if hasattr(self.workflow, 'totalTasks') else 'N/A'}, Actions {self.workflow.totalActions if hasattr(self.workflow, 'totalActions') else 'N/A'}")
|
|
except Exception as e:
|
|
logger.error(f"Error setting workflow totals: {str(e)}")
|
|
|
|
def resetWorkflowForNewSession(self):
|
|
"""Reset workflow values for a new workflow session"""
|
|
try:
|
|
# Reset all workflow progress values to initial state
|
|
self.workflow.currentRound = 0
|
|
self.workflow.currentTask = 0
|
|
self.workflow.currentAction = 0
|
|
self.workflow.totalTasks = 0
|
|
self.workflow.totalActions = 0
|
|
self.workflow.status = 'ready'
|
|
|
|
# Update workflow object in database with reset values
|
|
self.chatInterface.updateWorkflow(self.workflow.id, {
|
|
"currentRound": 0,
|
|
"currentTask": 0,
|
|
"currentAction": 0,
|
|
"totalTasks": 0,
|
|
"totalActions": 0,
|
|
"status": "ready"
|
|
})
|
|
|
|
logger.info("Workflow reset for new session - all values set to initial state and updated in database")
|
|
except Exception as e:
|
|
logger.error(f"Error resetting workflow for new session: {str(e)}") |