831 lines
40 KiB
Python
831 lines
40 KiB
Python
# modeActionplan.py
|
|
# Actionplan mode implementation for workflows
|
|
|
|
import json
|
|
import logging
|
|
import uuid
|
|
from typing import List, Dict, Any
|
|
from modules.datamodels.datamodelChat import (
|
|
TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus,
|
|
ActionResult, ReviewResult, ReviewContext
|
|
)
|
|
from modules.datamodels.datamodelChat import ChatWorkflow
|
|
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
|
|
from modules.workflows.processing.modes.modeBase import BaseMode
|
|
from modules.workflows.processing.shared.executionState import TaskExecutionState
|
|
from modules.workflows.processing.shared.promptGenerationActionsActionplan import (
|
|
generateActionDefinitionPrompt,
|
|
generateResultReviewPrompt
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class ActionplanMode(BaseMode):
|
|
"""Actionplan mode implementation - batch planning and sequential execution"""
|
|
|
|
def __init__(self, services, workflow):
|
|
super().__init__(services, workflow)
|
|
|
|
async def generateActionItems(self, taskStep: TaskStep, workflow: ChatWorkflow,
|
|
previousResults: List = None, enhancedContext: TaskContext = None) -> List[ActionItem]:
|
|
"""Generate actions for a given task step using batch planning approach"""
|
|
try:
|
|
# Check workflow status before generating actions
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
retryInfo = f" (Retry #{enhancedContext.retry_count})" if enhancedContext and enhancedContext.retry_count > 0 else ""
|
|
logger.info(f"Generating actions for task: {taskStep.objective}{retryInfo}")
|
|
|
|
# Log criteria progress if this is a retry
|
|
if enhancedContext and hasattr(enhancedContext, 'criteria_progress') and enhancedContext.criteria_progress is not None:
|
|
progress = enhancedContext.criteria_progress
|
|
logger.info(f"Retry attempt {enhancedContext.retry_count} - Criteria progress:")
|
|
if progress.get('met_criteria'):
|
|
logger.info(f" Met criteria: {', '.join(progress['met_criteria'])}")
|
|
if progress.get('unmet_criteria'):
|
|
logger.warning(f" Unmet criteria: {', '.join(progress['unmet_criteria'])}")
|
|
|
|
# Show improvement trends
|
|
if progress.get('attempt_history'):
|
|
recentAttempts = progress['attempt_history'][-2:] # Last 2 attempts
|
|
if len(recentAttempts) >= 2:
|
|
prevScore = recentAttempts[0].get('quality_score', 0)
|
|
currScore = recentAttempts[1].get('quality_score', 0)
|
|
if currScore > prevScore:
|
|
logger.info(f" Quality improving: {prevScore} -> {currScore}")
|
|
elif currScore < prevScore:
|
|
logger.warning(f" Quality declining: {prevScore} -> {currScore}")
|
|
else:
|
|
logger.info(f" Quality stable: {currScore}")
|
|
|
|
# Enhanced retry context logging
|
|
if enhancedContext and enhancedContext.retry_count > 0:
|
|
logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===")
|
|
logger.info(f"Retry Count: {enhancedContext.retry_count}")
|
|
logger.debug(f"Previous Improvements: {enhancedContext.improvements}")
|
|
logger.debug(f"Previous Review Result: {enhancedContext.previous_review_result}")
|
|
logger.debug(f"Failure Patterns: {enhancedContext.failure_patterns}")
|
|
logger.debug(f"Failed Actions: {enhancedContext.failed_actions}")
|
|
logger.debug(f"Successful Actions: {enhancedContext.successful_actions}")
|
|
logger.info("=== END RETRY CONTEXT ===")
|
|
|
|
# Log that we're starting action generation
|
|
logger.info("=== STARTING ACTION GENERATION ===")
|
|
|
|
# Create proper context object for action definition
|
|
if enhancedContext and isinstance(enhancedContext, TaskContext):
|
|
# Use existing TaskContext if provided
|
|
actionContext = TaskContext(
|
|
task_step=enhancedContext.task_step,
|
|
workflow=enhancedContext.workflow,
|
|
workflow_id=enhancedContext.workflow_id,
|
|
available_documents=enhancedContext.available_documents,
|
|
available_connections=enhancedContext.available_connections,
|
|
previous_results=enhancedContext.previous_results or previousResults or [],
|
|
previous_handover=enhancedContext.previous_handover,
|
|
improvements=enhancedContext.improvements or [],
|
|
retry_count=enhancedContext.retry_count or 0,
|
|
previous_action_results=enhancedContext.previous_action_results or [],
|
|
previous_review_result=enhancedContext.previous_review_result,
|
|
is_regeneration=enhancedContext.is_regeneration or False,
|
|
failure_patterns=enhancedContext.failure_patterns or [],
|
|
failed_actions=enhancedContext.failed_actions or [],
|
|
successful_actions=enhancedContext.successful_actions or [],
|
|
criteria_progress=enhancedContext.criteria_progress
|
|
)
|
|
else:
|
|
# Create new context from scratch
|
|
actionContext = TaskContext(
|
|
task_step=taskStep,
|
|
workflow=workflow,
|
|
workflow_id=workflow.id,
|
|
available_documents=None,
|
|
available_connections=None,
|
|
previous_results=previousResults or [],
|
|
previous_handover=None,
|
|
improvements=[],
|
|
retry_count=0,
|
|
previous_action_results=[],
|
|
previous_review_result=None,
|
|
is_regeneration=False,
|
|
failure_patterns=[],
|
|
failed_actions=[],
|
|
successful_actions=[],
|
|
criteria_progress=None
|
|
)
|
|
|
|
# Check workflow status before calling AI service
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
# Build prompt bundle (template + placeholders)
|
|
bundle = generateActionDefinitionPrompt(self.services, actionContext)
|
|
actionPromptTemplate = bundle.prompt
|
|
placeholders = bundle.placeholders
|
|
|
|
# Trace action planning prompt
|
|
self._writeTraceLog("Action Plan Prompt", actionPromptTemplate)
|
|
self._writeTraceLog("Action Plan Placeholders", placeholders)
|
|
|
|
# Centralized AI call: Action planning (quality, detailed) with placeholders
|
|
options = AiCallOptions(
|
|
operationType=OperationType.GENERATE_PLAN,
|
|
priority=Priority.QUALITY,
|
|
compressPrompt=False,
|
|
compressContext=False,
|
|
processingMode=ProcessingMode.DETAILED,
|
|
maxCost=0.10,
|
|
maxProcessingTime=30
|
|
)
|
|
|
|
prompt = await self.services.ai.coreAi.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
|
|
|
|
# Check if AI response is valid
|
|
if not prompt:
|
|
raise ValueError("AI service returned no response")
|
|
|
|
# Log action response received
|
|
logger.info("=== ACTION PLAN AI RESPONSE RECEIVED ===")
|
|
logger.info(f"Response length: {len(prompt) if prompt else 0}")
|
|
# Trace action planning response
|
|
self._writeTraceLog("Action Plan Response", prompt)
|
|
|
|
# Parse action response
|
|
jsonStart = prompt.find('{')
|
|
jsonEnd = prompt.rfind('}') + 1
|
|
if jsonStart == -1 or jsonEnd == 0:
|
|
raise ValueError("No JSON found in response")
|
|
jsonStr = prompt[jsonStart:jsonEnd]
|
|
|
|
try:
|
|
actionData = json.loads(jsonStr)
|
|
except Exception as e:
|
|
logger.error(f"Error parsing action response JSON: {str(e)}")
|
|
actionData = {}
|
|
|
|
if 'actions' not in actionData:
|
|
raise ValueError("Action response missing 'actions' field")
|
|
|
|
actions = actionData['actions']
|
|
if not actions:
|
|
raise ValueError("Action response contains empty actions list")
|
|
|
|
if not isinstance(actions, list):
|
|
raise ValueError(f"Action response 'actions' field is not a list: {type(actions)}")
|
|
|
|
if not self.validator.validateAction(actions, actionContext):
|
|
logger.error("Generated actions failed validation")
|
|
raise Exception("AI-generated actions failed validation - AI is required for action generation")
|
|
|
|
# Convert to ActionItem objects
|
|
taskActions = []
|
|
for i, a in enumerate(actions):
|
|
if not isinstance(a, dict):
|
|
logger.warning(f"Skipping invalid action {i+1}: not a dictionary")
|
|
continue
|
|
|
|
|
|
# Handle compound action format (new) or separate method/action format (old)
|
|
action_name = a.get('action', 'unknown')
|
|
if '.' in action_name:
|
|
# New compound action format: "method.action"
|
|
method_name, action_name = action_name.split('.', 1)
|
|
else:
|
|
# Old separate format: method + action fields
|
|
method_name = a.get('method', 'unknown')
|
|
|
|
taskAction = self._createActionItem({
|
|
"execMethod": method_name,
|
|
"execAction": action_name,
|
|
"execParameters": a.get('parameters', {}),
|
|
"execResultLabel": a.get('resultLabel', ''),
|
|
"expectedDocumentFormats": a.get('expectedDocumentFormats', None),
|
|
"status": TaskStatus.PENDING,
|
|
# Extract user-friendly message if available
|
|
"userMessage": a.get('userMessage', None)
|
|
})
|
|
|
|
if taskAction:
|
|
taskActions.append(taskAction)
|
|
else:
|
|
logger.warning(f"Skipping invalid action {i+1}: failed to create ActionItem")
|
|
|
|
validActions = [ta for ta in taskActions if ta]
|
|
|
|
if not validActions:
|
|
raise ValueError("No valid actions could be created from AI response")
|
|
|
|
return validActions
|
|
except Exception as e:
|
|
logger.error(f"Error in generateActionItems: {str(e)}")
|
|
return []
|
|
|
|
|
|
async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
|
|
taskIndex: int = None, totalTasks: int = None) -> TaskResult:
|
|
"""Execute all actions for a task step using Actionplan mode"""
|
|
logger.info(f"=== STARTING TASK {taskIndex or '?'}: {taskStep.objective} ===")
|
|
|
|
# Update workflow object before executing task
|
|
if taskIndex is not None:
|
|
self._updateWorkflowBeforeExecutingTask(taskIndex)
|
|
|
|
# Update workflow context for this task
|
|
if taskIndex is not None:
|
|
self.services.workflow.setWorkflowContext(task_number=taskIndex)
|
|
|
|
# Create task start message
|
|
await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, totalTasks)
|
|
|
|
state = TaskExecutionState(taskStep)
|
|
retryContext = context
|
|
maxRetries = state.max_retries
|
|
|
|
for attempt in range(maxRetries):
|
|
logger.info(f"Task execution attempt {attempt+1}/{maxRetries}")
|
|
|
|
# Check workflow status before starting task execution
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
# Update retry context with current attempt information
|
|
if retryContext:
|
|
retryContext.retry_count = attempt + 1
|
|
|
|
actions = await self.generateActionItems(taskStep, workflow,
|
|
previousResults=retryContext.previous_results,
|
|
enhancedContext=retryContext)
|
|
|
|
# Log total actions count for this task
|
|
totalActions = len(actions) if actions else 0
|
|
logger.info(f"Task {taskIndex or '?'} has {totalActions} actions")
|
|
|
|
# Update workflow object after action planning
|
|
self._updateWorkflowAfterActionPlanning(totalActions)
|
|
self._setWorkflowTotals(totalActions=totalActions)
|
|
|
|
if not actions:
|
|
logger.error("No actions defined for task step, aborting task execution")
|
|
break
|
|
|
|
actionResults = []
|
|
for actionIdx, action in enumerate(actions):
|
|
# Check workflow status before each action execution
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
# Update workflow object before executing action
|
|
actionNumber = actionIdx + 1
|
|
self._updateWorkflowBeforeExecutingAction(actionNumber)
|
|
|
|
# Update workflow context for this action
|
|
self.services.workflow.setWorkflowContext(action_number=actionNumber)
|
|
|
|
# Log action start
|
|
logger.info(f"Task {taskIndex} - Starting action {actionNumber}/{totalActions}")
|
|
|
|
# Create action start message
|
|
actionStartMessage = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": f"⚡ **Action {actionNumber}/{totalActions}** (Method {action.execMethod}.{action.execAction})",
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": self.services.utils.getUtcTimestamp(),
|
|
"documentsLabel": f"action_{actionNumber}_start",
|
|
"documents": [],
|
|
"actionProgress": "running",
|
|
"roundNumber": workflow.currentRound,
|
|
"taskNumber": taskIndex,
|
|
"actionNumber": actionNumber
|
|
}
|
|
|
|
# Add user-friendly message if available
|
|
if action.userMessage:
|
|
actionStartMessage["message"] += f"\n\n💬 {action.userMessage}"
|
|
|
|
self.services.workflow.storeMessageWithDocuments(workflow, actionStartMessage, [])
|
|
logger.info(f"Action start message created for action {actionNumber}")
|
|
|
|
# Execute single action
|
|
result = await self.actionExecutor.executeSingleAction(action, workflow, taskStep,
|
|
taskIndex, actionNumber, totalActions)
|
|
actionResults.append(result)
|
|
|
|
if result.success:
|
|
state.addSuccessfulAction(result)
|
|
else:
|
|
state.addFailedAction(result)
|
|
|
|
# Check workflow status before review
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
reviewResult = await self._reviewTaskCompletion(taskStep, actions, actionResults, workflow)
|
|
success = reviewResult.status == 'success'
|
|
feedback = reviewResult.reason
|
|
error = None if success else reviewResult.reason
|
|
|
|
if success:
|
|
logger.info(f"=== TASK {taskIndex or '?'} COMPLETED SUCCESSFULLY: {taskStep.objective} ===")
|
|
|
|
# Create task completion message
|
|
await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, totalTasks, reviewResult)
|
|
|
|
return TaskResult(
|
|
taskId=taskStep.id,
|
|
status=TaskStatus.COMPLETED,
|
|
success=True,
|
|
feedback=feedback,
|
|
error=None
|
|
)
|
|
|
|
elif reviewResult.status == 'retry' and state.canRetry():
|
|
logger.warning(f"Task step '{taskStep.objective}' requires retry: {reviewResult.improvements}")
|
|
|
|
# Enhanced logging of criteria status
|
|
if reviewResult.met_criteria:
|
|
logger.info(f"Met criteria: {', '.join(reviewResult.met_criteria)}")
|
|
if reviewResult.unmet_criteria:
|
|
logger.warning(f"Unmet criteria: {', '.join(reviewResult.unmet_criteria)}")
|
|
|
|
state.incrementRetryCount()
|
|
|
|
# Update retry context with retry information and criteria tracking
|
|
if retryContext:
|
|
retryContext.retry_count = state.retry_count
|
|
retryContext.improvements = reviewResult.improvements
|
|
retryContext.previous_action_results = actionResults
|
|
retryContext.previous_review_result = reviewResult
|
|
retryContext.is_regeneration = True
|
|
retryContext.failure_patterns = state.getFailurePatterns()
|
|
retryContext.failed_actions = state.failed_actions
|
|
retryContext.successful_actions = state.successful_actions
|
|
|
|
# Track criteria progress across retries
|
|
if not hasattr(retryContext, 'criteria_progress'):
|
|
retryContext.criteria_progress = {
|
|
'met_criteria': set(),
|
|
'unmet_criteria': set(),
|
|
'attempt_history': []
|
|
}
|
|
|
|
# Update criteria progress
|
|
if reviewResult.met_criteria:
|
|
retryContext.criteria_progress['met_criteria'].update(reviewResult.met_criteria)
|
|
if reviewResult.unmet_criteria:
|
|
retryContext.criteria_progress['unmet_criteria'].update(reviewResult.unmet_criteria)
|
|
|
|
# Record this attempt's criteria status
|
|
attemptRecord = {
|
|
'attempt': state.retry_count,
|
|
'met_criteria': reviewResult.met_criteria or [],
|
|
'unmet_criteria': reviewResult.unmet_criteria or [],
|
|
'quality_score': reviewResult.quality_score,
|
|
'improvements': reviewResult.improvements or []
|
|
}
|
|
retryContext.criteria_progress['attempt_history'].append(attemptRecord)
|
|
|
|
# Create retry message
|
|
await self.messageCreator.createRetryMessage(taskStep, workflow, taskIndex, reviewResult)
|
|
|
|
continue
|
|
else:
|
|
logger.error(f"=== TASK {taskIndex or '?'} FAILED: {taskStep.objective} after {attempt+1} attempts ===")
|
|
|
|
# Create error message
|
|
await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, reviewResult.reason)
|
|
|
|
return TaskResult(
|
|
taskId=taskStep.id,
|
|
status=TaskStatus.FAILED,
|
|
success=False,
|
|
feedback=feedback,
|
|
error=reviewResult.reason if reviewResult and hasattr(reviewResult, 'reason') else "Task failed after retry attempts"
|
|
)
|
|
|
|
logger.error(f"=== TASK {taskIndex or '?'} FAILED AFTER ALL RETRIES: {taskStep.objective} ===")
|
|
|
|
# Create final error message
|
|
await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, "Task failed after all retries")
|
|
|
|
return TaskResult(
|
|
taskId=taskStep.id,
|
|
status=TaskStatus.FAILED,
|
|
success=False,
|
|
feedback="Task failed after all retries.",
|
|
error="Task failed after all retries."
|
|
)
|
|
|
|
async def _reviewTaskCompletion(self, taskStep: TaskStep, taskActions: List[ActionItem],
|
|
actionResults: List[ActionResult], workflow: ChatWorkflow) -> ReviewResult:
|
|
"""Review task completion and determine success/failure/retry"""
|
|
try:
|
|
# Check workflow status before reviewing task completion
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
logger.info(f"=== STARTING TASK COMPLETION REVIEW ===")
|
|
logger.info(f"Task: {taskStep.objective}")
|
|
logger.info(f"Actions executed: {len(taskActions) if taskActions else 0}")
|
|
logger.info(f"Action results: {len(actionResults) if actionResults else 0}")
|
|
|
|
# Create proper context object for result review
|
|
reviewContext = ReviewContext(
|
|
task_step=taskStep,
|
|
task_actions=taskActions,
|
|
action_results=actionResults,
|
|
step_result={
|
|
'successful_actions': sum(1 for result in actionResults if result.success),
|
|
'total_actions': len(actionResults),
|
|
'results': [self._extractResultText(result) for result in actionResults if result.success],
|
|
'errors': [result.error for result in actionResults if not result.success],
|
|
'documents': [
|
|
{
|
|
'action_index': i,
|
|
'documents_count': len(result.documents) if result.documents else 0,
|
|
'documents': result.documents if result.documents else []
|
|
}
|
|
for i, result in enumerate(actionResults)
|
|
]
|
|
},
|
|
workflow_id=workflow.id,
|
|
previous_results=[]
|
|
)
|
|
|
|
# Check workflow status before calling AI service
|
|
self._checkWorkflowStopped(workflow)
|
|
|
|
# Build prompt bundle for result review
|
|
bundle = generateResultReviewPrompt(reviewContext)
|
|
promptTemplate = bundle.prompt
|
|
placeholders = bundle.placeholders
|
|
|
|
# Log result review prompt sent to AI
|
|
logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
|
|
logger.info(f"Task: {taskStep.objective}")
|
|
logger.info(f"Action Results Count: {len(reviewContext.action_results) if reviewContext.action_results else 0}")
|
|
logger.info(f"Task Actions Count: {len(reviewContext.task_actions) if reviewContext.task_actions else 0}")
|
|
# Trace result review prompt
|
|
self._writeTraceLog("Result Review Prompt", promptTemplate)
|
|
self._writeTraceLog("Result Review Placeholders", placeholders)
|
|
|
|
# Centralized AI call: Result validation (balanced analysis) with placeholders
|
|
options = AiCallOptions(
|
|
operationType=OperationType.ANALYSE_CONTENT,
|
|
priority=Priority.BALANCED,
|
|
compressPrompt=True,
|
|
compressContext=False,
|
|
processingMode=ProcessingMode.ADVANCED,
|
|
maxCost=0.05,
|
|
maxProcessingTime=30
|
|
)
|
|
|
|
response = await self.services.ai.coreAi.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
|
|
|
|
# Log result review response received
|
|
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
|
logger.info(f"Response length: {len(response) if response else 0}")
|
|
# Trace result review response
|
|
self._writeTraceLog("Result Review Response", response)
|
|
|
|
# Parse review response
|
|
jsonStart = response.find('{')
|
|
jsonEnd = response.rfind('}') + 1
|
|
if jsonStart == -1 or jsonEnd == 0:
|
|
raise ValueError("No JSON found in review response")
|
|
jsonStr = response[jsonStart:jsonEnd]
|
|
|
|
try:
|
|
review = json.loads(jsonStr)
|
|
except Exception as e:
|
|
logger.error(f"Error parsing review response JSON: {str(e)}")
|
|
review = {}
|
|
if 'status' not in review:
|
|
raise ValueError("Review response missing 'status' field")
|
|
review.setdefault('status', 'unknown')
|
|
review.setdefault('reason', 'No reason provided')
|
|
review.setdefault('quality_score', 5)
|
|
|
|
# Ensure improvements is a list
|
|
improvements = review.get('improvements', [])
|
|
if isinstance(improvements, str):
|
|
# Split string into list if it's a single improvement
|
|
improvements = [improvements.strip()] if improvements.strip() else []
|
|
elif not isinstance(improvements, list):
|
|
improvements = []
|
|
|
|
# Ensure all list fields are properly typed
|
|
metCriteria = review.get('met_criteria', [])
|
|
if not isinstance(metCriteria, list):
|
|
metCriteria = []
|
|
|
|
unmetCriteria = review.get('unmet_criteria', [])
|
|
if not isinstance(unmetCriteria, list):
|
|
unmetCriteria = []
|
|
|
|
reviewResult = ReviewResult(
|
|
status=review.get('status', 'unknown'),
|
|
reason=review.get('reason', 'No reason provided'),
|
|
improvements=improvements,
|
|
quality_score=review.get('quality_score', 5),
|
|
missing_outputs=[],
|
|
met_criteria=metCriteria,
|
|
unmet_criteria=unmetCriteria,
|
|
confidence=review.get('confidence', 0.5),
|
|
# Extract user-friendly message if available
|
|
userMessage=review.get('userMessage', None)
|
|
)
|
|
|
|
# Enhanced validation logging
|
|
logger.info(f"VALIDATION RESULT - Task: '{taskStep.objective}' - Status: {reviewResult.status.upper()}, Quality: {reviewResult.quality_score}/10")
|
|
if reviewResult.status == 'success':
|
|
logger.info(f"VALIDATION SUCCESS - Task completed successfully")
|
|
if reviewResult.met_criteria:
|
|
logger.info(f"Met criteria: {', '.join(reviewResult.met_criteria)}")
|
|
elif reviewResult.status == 'retry':
|
|
logger.warning(f"VALIDATION RETRY - Task requires retry: {reviewResult.improvements}")
|
|
if reviewResult.unmet_criteria:
|
|
logger.warning(f"Unmet criteria: {', '.join(reviewResult.unmet_criteria)}")
|
|
else:
|
|
logger.error(f"VALIDATION FAILED - Task failed: {reviewResult.reason}")
|
|
|
|
logger.info(f"=== TASK COMPLETION REVIEW FINISHED ===")
|
|
logger.info(f"Final Status: {reviewResult.status}")
|
|
logger.info(f"Quality Score: {reviewResult.quality_score}/10")
|
|
logger.info(f"Improvements: {reviewResult.improvements}")
|
|
logger.info("=== END REVIEW ===")
|
|
|
|
return reviewResult
|
|
except Exception as e:
|
|
logger.error(f"Error in reviewTaskCompletion: {str(e)}")
|
|
return ReviewResult(
|
|
status='failed',
|
|
reason=str(e),
|
|
quality_score=0
|
|
)
|
|
|
|
def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
|
|
"""Creates a new task action"""
|
|
try:
|
|
# Ensure ID is present
|
|
if "id" not in actionData or not actionData["id"]:
|
|
actionData["id"] = f"action_{uuid.uuid4()}"
|
|
|
|
# Ensure required fields
|
|
if "status" not in actionData:
|
|
actionData["status"] = TaskStatus.PENDING
|
|
|
|
if "execMethod" not in actionData:
|
|
logger.error("execMethod is required for task action")
|
|
return None
|
|
|
|
if "execAction" not in actionData:
|
|
logger.error("execAction is required for task action")
|
|
return None
|
|
|
|
if "execParameters" not in actionData:
|
|
actionData["execParameters"] = {}
|
|
|
|
# Use generic field separation based on ActionItem model
|
|
simpleFields, objectFields = self.services.interfaceDbChat._separate_object_fields(ActionItem, actionData)
|
|
|
|
# Create action in database
|
|
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
|
|
|
|
# Convert to ActionItem model
|
|
return ActionItem(
|
|
id=createdAction["id"],
|
|
execMethod=createdAction["execMethod"],
|
|
execAction=createdAction["execAction"],
|
|
execParameters=createdAction.get("execParameters", {}),
|
|
execResultLabel=createdAction.get("execResultLabel"),
|
|
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
|
|
status=createdAction.get("status", TaskStatus.PENDING),
|
|
error=createdAction.get("error"),
|
|
retryCount=createdAction.get("retryCount", 0),
|
|
retryMax=createdAction.get("retryMax", 3),
|
|
processingTime=createdAction.get("processingTime"),
|
|
timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
|
|
result=createdAction.get("result"),
|
|
resultDocuments=createdAction.get("resultDocuments", []),
|
|
userMessage=createdAction.get("userMessage")
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating task action: {str(e)}")
|
|
return None
|
|
|
|
def _extractResultText(self, result: ActionResult) -> str:
|
|
"""Extract result text from ActionResult documents"""
|
|
if not result.success or not result.documents:
|
|
return ""
|
|
|
|
# Extract text directly from ActionDocument objects
|
|
resultParts = []
|
|
for doc in result.documents:
|
|
if hasattr(doc, 'documentData') and doc.documentData:
|
|
resultParts.append(str(doc.documentData))
|
|
|
|
# Join all document results with separators
|
|
return "\n\n---\n\n".join(resultParts) if resultParts else ""
|
|
|
|
def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
|
|
"""Update workflow object before executing a task"""
|
|
try:
|
|
updateData = {
|
|
"currentTask": taskNumber,
|
|
"currentAction": 0,
|
|
"totalActions": 0
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.currentTask = taskNumber
|
|
self.workflow.currentAction = 0
|
|
self.workflow.totalActions = 0
|
|
|
|
# Update in database
|
|
self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
|
|
logger.info(f"Updated workflow {self.workflow.id} before executing task {taskNumber}: {updateData}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow before executing task: {str(e)}")
|
|
|
|
def _updateWorkflowAfterActionPlanning(self, totalActions: int):
|
|
"""Update workflow object after action planning for current task"""
|
|
try:
|
|
updateData = {
|
|
"totalActions": totalActions
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.totalActions = totalActions
|
|
|
|
# Update in database
|
|
self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
|
|
logger.info(f"Updated workflow {self.workflow.id} after action planning: {updateData}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow after action planning: {str(e)}")
|
|
|
|
def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
|
|
"""Update workflow object before executing an action"""
|
|
try:
|
|
updateData = {
|
|
"currentAction": actionNumber
|
|
}
|
|
|
|
# Update workflow object
|
|
self.workflow.currentAction = actionNumber
|
|
|
|
# Update in database
|
|
self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
|
|
logger.info(f"Updated workflow {self.workflow.id} before executing action {actionNumber}: {updateData}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error updating workflow before executing action: {str(e)}")
|
|
|
|
def _setWorkflowTotals(self, totalTasks: int = None, totalActions: int = None):
|
|
"""Set total counts for workflow progress tracking and update database"""
|
|
try:
|
|
updateData = {}
|
|
|
|
if totalTasks is not None:
|
|
self.workflow.totalTasks = totalTasks
|
|
updateData["totalTasks"] = totalTasks
|
|
|
|
if totalActions is not None:
|
|
self.workflow.totalActions = totalActions
|
|
updateData["totalActions"] = totalActions
|
|
|
|
# Update workflow object in database if we have changes
|
|
if updateData:
|
|
self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
|
|
logger.info(f"Updated workflow {self.workflow.id} totals in database: {updateData}")
|
|
|
|
logger.debug(f"Updated workflow totals: Tasks {self.workflow.totalTasks if hasattr(self.workflow, 'totalTasks') else 'N/A'}, Actions {self.workflow.totalActions if hasattr(self.workflow, 'totalActions') else 'N/A'}")
|
|
except Exception as e:
|
|
logger.error(f"Error setting workflow totals: {str(e)}")
|
|
|
|
def _createActionItem(self, actionData: Dict[str, Any]) -> ActionItem:
|
|
"""Creates a new task action"""
|
|
try:
|
|
import uuid
|
|
|
|
# Ensure ID is present
|
|
if "id" not in actionData or not actionData["id"]:
|
|
actionData["id"] = f"action_{uuid.uuid4()}"
|
|
|
|
# Ensure required fields
|
|
if "status" not in actionData:
|
|
actionData["status"] = TaskStatus.PENDING
|
|
|
|
if "execMethod" not in actionData:
|
|
logger.error("execMethod is required for task action")
|
|
return None
|
|
|
|
if "execAction" not in actionData:
|
|
logger.error("execAction is required for task action")
|
|
return None
|
|
|
|
if "execParameters" not in actionData:
|
|
actionData["execParameters"] = {}
|
|
|
|
# Use generic field separation based on ActionItem model
|
|
simpleFields, objectFields = self.services.interfaceDbChat._separate_object_fields(ActionItem, actionData)
|
|
|
|
# Create action in database
|
|
createdAction = self.services.interfaceDbChat.db.recordCreate(ActionItem, simpleFields)
|
|
|
|
# Convert to ActionItem model
|
|
return ActionItem(
|
|
id=createdAction["id"],
|
|
execMethod=createdAction["execMethod"],
|
|
execAction=createdAction["execAction"],
|
|
execParameters=createdAction.get("execParameters", {}),
|
|
execResultLabel=createdAction.get("execResultLabel"),
|
|
expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
|
|
status=createdAction.get("status", TaskStatus.PENDING),
|
|
error=createdAction.get("error"),
|
|
retryCount=createdAction.get("retryCount", 0),
|
|
retryMax=createdAction.get("retryMax", 3),
|
|
processingTime=createdAction.get("processingTime"),
|
|
timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
|
|
result=createdAction.get("result"),
|
|
resultDocuments=createdAction.get("resultDocuments", []),
|
|
userMessage=createdAction.get("userMessage")
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating task action: {str(e)}")
|
|
return None
|
|
|
|
def _writeTraceLog(self, contextText: str, data: Any) -> None:
|
|
"""Write trace data to configured trace file if in debug mode with improved JSON formatting"""
|
|
try:
|
|
import os
|
|
import json
|
|
from datetime import datetime, UTC
|
|
|
|
# Only write if logger is in debug mode
|
|
if logger.level > logging.DEBUG:
|
|
return
|
|
|
|
# Get log directory from configuration
|
|
logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
|
|
if not os.path.isabs(logDir):
|
|
# If relative path, make it relative to the gateway directory
|
|
gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
|
logDir = os.path.join(gatewayDir, logDir)
|
|
|
|
# Ensure log directory exists
|
|
os.makedirs(logDir, exist_ok=True)
|
|
|
|
# Create trace file path
|
|
traceFile = os.path.join(logDir, "log_trace.log")
|
|
|
|
# Format the trace entry with better structure
|
|
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
|
|
# Create a structured trace entry
|
|
traceEntry = f"[{timestamp}] {contextText}\n"
|
|
traceEntry += "=" * 80 + "\n"
|
|
|
|
# Add data if provided with improved formatting
|
|
if data is not None:
|
|
try:
|
|
if isinstance(data, (dict, list)):
|
|
# Format as pretty JSON with better settings
|
|
jsonStr = json.dumps(data, indent=2, default=str, ensure_ascii=False, sort_keys=False)
|
|
traceEntry += f"JSON Data:\n{jsonStr}\n"
|
|
elif isinstance(data, str):
|
|
# For string data, try to parse as JSON first, then fall back to plain text
|
|
try:
|
|
parsed = json.loads(data)
|
|
jsonStr = json.dumps(parsed, indent=2, default=str, ensure_ascii=False, sort_keys=False)
|
|
traceEntry += f"JSON Data (parsed from string):\n{jsonStr}\n"
|
|
except (json.JSONDecodeError, TypeError):
|
|
# Not valid JSON, show as plain text with proper line breaks
|
|
formatted_data = data.replace('\\n', '\n')
|
|
traceEntry += f"Text Data:\n{formatted_data}\n"
|
|
else:
|
|
# For other types, convert to string and try to parse as JSON
|
|
dataStr = str(data)
|
|
try:
|
|
parsed = json.loads(dataStr)
|
|
jsonStr = json.dumps(parsed, indent=2, default=str, ensure_ascii=False, sort_keys=False)
|
|
traceEntry += f"JSON Data (parsed from object):\n{jsonStr}\n"
|
|
except (json.JSONDecodeError, TypeError):
|
|
# Not valid JSON, show as plain text with proper line breaks
|
|
formatted_data = dataStr.replace('\\n', '\n')
|
|
traceEntry += f"Object Data:\n{formatted_data}\n"
|
|
except Exception as e:
|
|
# Fallback to simple string representation
|
|
traceEntry += f"Data (fallback): {str(data)}\n"
|
|
else:
|
|
traceEntry += "No data provided\n"
|
|
|
|
traceEntry += "=" * 80 + "\n\n"
|
|
|
|
# Write to trace file
|
|
with open(traceFile, "a", encoding="utf-8") as f:
|
|
f.write(traceEntry)
|
|
|
|
except Exception as e:
|
|
# Don't log trace errors to avoid recursion
|
|
pass
|