gateway/modules/workflow/managerChat.py
2025-07-04 15:10:26 +02:00

1311 lines
No EOL
54 KiB
Python

import asyncio
import logging
import uuid
import json
from typing import Dict, Any, Optional, List, Union
from datetime import datetime, UTC
from modules.interfaces.interfaceAppModel import User
from modules.interfaces.interfaceChatModel import (
TaskStatus, ChatDocument, TaskItem, TaskAction, TaskResult, ChatStat, ChatLog, ChatMessage, ChatWorkflow
)
from modules.workflow.serviceContainer import ServiceContainer
from modules.interfaces.interfaceChatObjects import ChatObjects
logger = logging.getLogger(__name__)
class ChatManager:
"""Chat manager with improved AI integration and method handling"""
def __init__(self, currentUser: User, chatInterface: ChatObjects):
self.currentUser = currentUser
self.chatInterface = chatInterface
self.service: ServiceContainer = None
self.workflow: ChatWorkflow = None
# Circuit breaker for AI calls
self.ai_failure_count = 0
self.ai_last_failure_time = None
self.ai_circuit_breaker_threshold = 5
self.ai_circuit_breaker_timeout = 300 # 5 minutes
# Timeout settings
self.ai_call_timeout = 120 # 2 minutes
self.task_execution_timeout = 600 # 10 minutes
# ===== Initialization and Setup =====
async def initialize(self, workflow: ChatWorkflow) -> None:
"""Initialize chat manager with workflow"""
self.workflow = workflow
self.service = ServiceContainer(self.currentUser, self.workflow)
def _extractJsonFromResponse(self, response: str) -> Optional[Dict[str, Any]]:
"""Extract JSON from verbose AI response that may contain explanatory text"""
try:
# First try direct JSON parsing
return json.loads(response)
except json.JSONDecodeError:
# Try to find JSON in the response
import re
# Look for JSON object patterns with more flexible matching
json_patterns = [
r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', # Nested JSON objects
r'\{.*?\}', # Simple JSON object (non-greedy)
r'\[\{.*?\}\]', # JSON array of objects
]
for pattern in json_patterns:
matches = re.findall(pattern, response, re.DOTALL)
for match in matches:
try:
# Clean up the match
cleaned_match = match.strip()
# Remove any markdown code blocks
if cleaned_match.startswith('```json'):
cleaned_match = cleaned_match[7:]
if cleaned_match.endswith('```'):
cleaned_match = cleaned_match[:-3]
cleaned_match = cleaned_match.strip()
parsed_json = json.loads(cleaned_match)
logger.info(f"Successfully extracted JSON from response using pattern: {pattern[:20]}...")
return parsed_json
except json.JSONDecodeError:
continue
# If no JSON found, log the full response for debugging
logger.error(f"Could not extract JSON from response: {response[:500]}...")
return None
# ===== Task Creation and Management =====
async def createInitialTask(self, workflow: ChatWorkflow, initialMessage: ChatMessage) -> Optional[TaskItem]:
"""Create the initial task from the first message"""
try:
logger.info(f"Creating initial task for workflow {workflow.id}")
# Create task definition prompt
prompt = await self._createTaskDefinitionPrompt(initialMessage.message, workflow)
# Get AI response
response = await self.service.callAiTextAdvanced(prompt)
# Parse response
taskDef = self._extractJsonFromResponse(response)
# Validate task definition
if not taskDef:
logger.error("Could not extract valid JSON from AI response")
return None
if not isinstance(taskDef, dict):
logger.error("Task definition must be a JSON object")
return None
requiredFields = ["status", "feedback", "actions"]
for field in requiredFields:
if field not in taskDef:
logger.error(f"Missing required field: {field}")
return None
if not isinstance(taskDef["actions"], list):
logger.error("Actions must be a list")
return None
logger.info(f"Task definition validated: {len(taskDef['actions'])} actions")
# Create task using interface
taskData = {
"workflowId": workflow.id,
"userInput": initialMessage.message,
"status": taskDef["status"],
"feedback": taskDef["feedback"],
"actionList": []
}
# Add actions
for actionDef in taskDef["actions"]:
if not isinstance(actionDef, dict):
continue
requiredFields = ["method", "action", "parameters"]
if not all(field in actionDef for field in requiredFields):
continue
# Create action using interface
actionData = {
"execMethod": actionDef["method"],
"execAction": actionDef["action"],
"execParameters": actionDef["parameters"],
"execResultLabel": actionDef.get("resultLabel")
}
action = self.chatInterface.createTaskAction(actionData)
if action:
# Convert TaskAction object to dictionary for database storage
actionDict = {
"id": action.id,
"execMethod": action.execMethod,
"execAction": action.execAction,
"execParameters": action.execParameters,
"execResultLabel": action.execResultLabel,
"status": action.status,
"error": action.error,
"retryCount": action.retryCount,
"retryMax": action.retryMax,
"processingTime": action.processingTime,
"timestamp": action.timestamp.isoformat() if action.timestamp else None,
"result": action.result,
"resultDocuments": action.resultDocuments
}
taskData["actionList"].append(actionDict)
# Create task using interface
task = self.chatInterface.createTask(taskData)
if task:
logger.info(f"Task created successfully: {task.id}")
else:
logger.error("Failed to create task")
return task
except Exception as e:
logger.error(f"Error creating initial task: {str(e)}")
return None
async def createNextTask(self, workflow: ChatWorkflow, previousResult: TaskResult) -> Optional[TaskItem]:
"""Create next task based on previous result"""
try:
logger.info(f"Creating next task for workflow {workflow.id}")
# Check if previous result was successful
if not previousResult.success:
logger.error(f"Previous task failed: {previousResult.error}")
return None
# Create task definition prompt
prompt = await self._createTaskDefinitionPrompt(previousResult.feedback, workflow)
# Get AI response
response = await self.service.callAiTextAdvanced(prompt)
# Parse response
taskDef = self._extractJsonFromResponse(response)
# Validate task definition
if not taskDef:
logger.error("Could not extract valid JSON from AI response")
return None
if not isinstance(taskDef, dict):
logger.error("Task definition must be a JSON object")
return None
requiredFields = ["status", "feedback", "actions"]
for field in requiredFields:
if field not in taskDef:
logger.error(f"Missing required field: {field}")
return None
if not isinstance(taskDef["actions"], list):
logger.error("Actions must be a list")
return None
logger.info(f"Next task definition validated: {len(taskDef['actions'])} actions")
# Create task using interface
taskData = {
"workflowId": workflow.id,
"userInput": previousResult.feedback,
"status": taskDef["status"],
"feedback": taskDef["feedback"],
"actionList": []
}
# Add actions
for actionDef in taskDef["actions"]:
if not isinstance(actionDef, dict):
continue
requiredFields = ["method", "action", "parameters"]
if not all(field in actionDef for field in requiredFields):
continue
# Create action using interface
actionData = {
"execMethod": actionDef["method"],
"execAction": actionDef["action"],
"execParameters": actionDef["parameters"],
"execResultLabel": actionDef.get("resultLabel")
}
action = self.chatInterface.createTaskAction(actionData)
if action:
# Convert TaskAction object to dictionary for database storage
actionDict = {
"id": action.id,
"execMethod": action.execMethod,
"execAction": action.execAction,
"execParameters": action.execParameters,
"execResultLabel": action.execResultLabel,
"status": action.status,
"error": action.error,
"retryCount": action.retryCount,
"retryMax": action.retryMax,
"processingTime": action.processingTime,
"timestamp": action.timestamp.isoformat() if action.timestamp else None,
"result": action.result,
"resultDocuments": action.resultDocuments
}
taskData["actionList"].append(actionDict)
# Create task using interface
task = self.chatInterface.createTask(taskData)
if task:
logger.info(f"Next task created successfully: {task.id}")
else:
logger.error("Failed to create next task")
return task
except Exception as e:
logger.error(f"Error creating next task: {str(e)}")
return None
async def executeTask(self, task: TaskItem) -> TaskItem:
"""Execute a task's actions"""
try:
# Execute each action
for action in task.actionList:
# Create action prompt
prompt = f"""Execute the following action:
Action: {action.execMethod}.{action.execAction}
Parameters: {json.dumps(action.execParameters)}
Please provide a JSON response with:
1. result: The result of the action
2. resultLabel: A label for the result (format: documentList_<uuid>_<label>)
3. documents: List of document references (format: document_<id>_<filename>)
4. error: Error message if the action failed
Example format:
{{
"result": "string",
"resultLabel": "documentList_<uuid>_<label>",
"documents": [
"document_<id>_<filename>"
],
"error": "string"
}}"""
# Get AI response
response = await self.service.callAiTextBasic(prompt)
# Parse response
result = self._extractJsonFromResponse(response)
if not result:
logger.error(f"Invalid JSON in action result: {response}")
action.status = "failed"
action.error = "Invalid result format"
continue
# Update action
action.status = "completed" if not result.get("error") else "failed"
action.result = result.get("result", "")
action.error = result.get("error", "")
action.execResultLabel = result.get("resultLabel", "")
# Process documents from AI response
documents = []
if result.get("documents") and isinstance(result["documents"], list):
for docRef in result["documents"]:
try:
# Parse document reference: document_<id>_<filename>
if docRef.startswith("document_"):
parts = docRef.split("_", 2)
if len(parts) >= 3:
docId = parts[1]
filename = parts[2]
# Create ChatDocument using interface
documentData = {
"id": docId,
"filename": filename,
"fileSize": 0, # Will be updated if file exists
"mimeType": "application/octet-stream"
}
document = self.chatInterface.createChatDocument(documentData)
if document:
documents.append(document)
logger.info(f"Created document reference: {docRef}")
except Exception as e:
logger.warning(f"Error processing document reference {docRef}: {str(e)}")
# Create message for action result using interface
messageData = {
"workflowId": task.workflowId,
"role": "assistant",
"message": action.result,
"status": "step",
"sequenceNr": len(self.workflow.messages) + 1,
"publishedAt": datetime.now(UTC).isoformat(),
"actionId": action.id,
"actionMethod": action.execMethod,
"actionName": action.execAction,
"documentsLabel": action.execResultLabel,
"documents": documents # ✅ Now properly storing documents
}
message = self.chatInterface.createWorkflowMessage(messageData)
if message:
self.workflow.messages.append(message)
logger.info(f"Action execution logged: {action.execMethod}.{action.execAction} - {action.status} - Documents: {len(documents)}")
else:
logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
# If action failed, stop execution
if action.status == "failed":
break
# Update task status
task.status = "completed" if all(a.status == "completed" for a in task.actionList) else "failed"
return task
except Exception as e:
logger.error(f"Error executing task: {str(e)}")
task.status = "failed"
return task
async def parseTaskResult(self, workflow: ChatWorkflow, task: TaskItem) -> None:
"""Parse and process task results"""
try:
# Create result message using interface
messageData = {
"workflowId": workflow.id,
"role": "assistant",
"message": task.feedback,
"status": "step",
"sequenceNr": len(workflow.messages) + 1,
"publishedAt": datetime.now(UTC).isoformat(),
"actionId": task.id
}
message = self.chatInterface.createWorkflowMessage(messageData)
if message:
workflow.messages.append(message)
# Update workflow stats
if task.processingTime:
if not workflow.stats:
workflow.stats = ChatStat()
workflow.stats.processingTime = (workflow.stats.processingTime or 0) + task.processingTime
except Exception as e:
logger.error(f"Error parsing task result: {str(e)}")
raise
async def shouldContinue(self, workflow: ChatWorkflow) -> bool:
"""Determine if workflow should continue"""
try:
# Check if workflow is in a terminal state
if workflow.status in ["completed", "failed", "stopped"]:
return False
# Get all tasks for the workflow
tasks = self.service.tasks
# Check if there are any pending tasks
hasPendingTasks = any(t.status == "pending" for t in tasks)
if not hasPendingTasks:
return False
# Check if any task is currently running
hasRunningTasks = any(t.status == "running" for t in tasks)
if hasRunningTasks:
return True
return False
except Exception as e:
logger.error(f"Error checking workflow continuation: {str(e)}")
return False
async def identifyNextTask(self, workflow: ChatWorkflow) -> TaskResult:
"""Identify the next task to execute"""
try:
# Get workflow summary
summary = await self.service.summarizeChat(workflow.messages)
# Create prompt for next task identification
prompt = f"""Based on the workflow history and current state, identify the next task:
Workflow History:
{summary}
Please provide a JSON response with:
1. feedback: Summary of current state and what needs to be done next
2. success: Whether the workflow can continue
3. error: Any error message if workflow cannot continue
Example format:
{{
"feedback": "string",
"success": true,
"error": "string"
}}"""
# Get AI response
response = await self.service.callAiTextBasic(prompt)
# Parse response
result = self._extractJsonFromResponse(response)
if not result:
logger.error(f"Invalid JSON in next task identification: {response}")
# Create error result using interface
errorResultData = {
"status": "failed",
"success": False,
"error": "Invalid result format"
}
return self.chatInterface.createTaskResult(errorResultData)
# Create result using interface
resultData = {
"status": "completed" if result.get("success", False) else "failed",
"success": result.get("success", False),
"feedback": result.get("feedback", ""),
"error": result.get("error", "")
}
return self.chatInterface.createTaskResult(resultData)
except Exception as e:
logger.error(f"Error identifying next task: {str(e)}")
# Create error result using interface
errorResultData = {
"status": "failed",
"success": False,
"error": str(e)
}
return self.chatInterface.createTaskResult(errorResultData)
async def generateWorkflowFeedback(self, workflow: ChatWorkflow) -> str:
"""Generate final feedback for the workflow"""
try:
# Get workflow summary
workflowSummary = {
"status": workflow.status,
"totalMessages": len(workflow.messages),
"totalDocuments": sum(len(msg.documents) for msg in workflow.messages),
"duration": (datetime.now(UTC) - datetime.fromisoformat(workflow.startedAt)).total_seconds()
}
# Get chat summary using service
chatSummary = await self.service.summarizeChat(workflow.messages)
# Create detailed prompt
prompt = f"""You are an AI assistant providing a summary of a completed workflow.
Please respond in '{self.service.user.language}' language.
Workflow Summary:
Status: {workflowSummary['status']}
Total Messages: {workflowSummary['totalMessages']}
Total Documents: {workflowSummary['totalDocuments']}
Duration: {workflowSummary['duration']:.1f} seconds
Chat Summary:
{chatSummary}
Instructions:
1. Summarize the workflow's activities, outcomes, and any important points
2. Be concise but informative
3. Use a professional but friendly tone
4. Focus on key achievements and next steps if any
Please provide a comprehensive summary of this workflow."""
# Generate feedback using AI
feedback = await self.service.callAiTextBasic(prompt)
return feedback
except Exception as e:
logger.error(f"Error generating workflow feedback: {str(e)}")
return "Workflow completed successfully."
async def _createTaskDefinitionPrompt(self, userInput: str, workflow: ChatWorkflow) -> str:
"""Create prompt for task definition"""
# Get available methods
methodList = self.service.getMethodsList()
# Get workflow history
messageSummary = await self.service.summarizeChat(workflow.messages)
# Get available documents and connections
docRefs = self.service.getDocumentReferenceList()
connRefs = self.service.getConnectionReferenceList()
prompt = f"""You are a task planning AI that creates structured task definitions in JSON format.
TASK REQUEST: {userInput}
CONTEXT:
Chat History: {messageSummary}
AVAILABLE RESOURCES:
Methods: {chr(10).join(f"- {method}" for method in methodList)}
Connections: {chr(10).join(f"- {conn['connectionReference']} ({conn['authority']})" for conn in connRefs)}
Documents: {chr(10).join(f"- {doc['documentReference']} ({doc['actionMethod']}.{doc['actionName']} - {doc['documentCount']} docs) - {doc['datetime']}" for doc in docRefs.get('chat', []))}
INSTRUCTIONS:
1. Analyze the task request and available resources
2. Create a sequence of actions to accomplish the task
3. Use ONLY the provided methods, documents, and connections
4. Return a VALID JSON object with the exact structure shown below
REQUIRED JSON STRUCTURE:
{{
"status": "pending",
"feedback": "Clear explanation of what will be done",
"actions": [
{{
"method": "method_name",
"action": "action_name",
"parameters": {{
"param1": "value1",
"param2": "value2"
}},
"resultLabel": "documentList_uuid_descriptive_label"
}}
]
}}
JSON FIELD REQUIREMENTS:
- "status": Must be "pending", "running", "completed", or "failed"
- "feedback": Human-readable explanation of the task plan
- "actions": Array of action objects (can be empty if no actions needed)
- "method": Must be one of the available methods listed above
- "action": Must be a valid action for that method
- "parameters": Object with method-specific parameters
- "resultLabel": CRITICAL - Must follow format: "documentList_uuid_descriptive_label"
RESULT LABEL REQUIREMENTS (CRITICAL):
- You MUST set a resultLabel for each action
- Format: "documentList_uuid_descriptive_label"
- uuid: Generate a unique identifier (e.g., abc123, def456)
- descriptive_label: Clear description of what the action produces (e.g., sales_documents, analysis_results, quarterly_report)
- Examples: "documentList_abc123_sales_documents", "documentList_def456_analysis_results"
- This label will be used to reference the results in subsequent actions
PARAMETER RULES:
- Use only document references from "Documents" section above
- Use only connection references from "Connections" section above
- Use result labels from previous actions in the sequence
- All parameter values must be strings
- Document references show: method.action - document count - timestamp
EXAMPLE VALID JSON:
{{
"status": "pending",
"feedback": "I will search SharePoint for sales documents and then analyze the quarterly data to create a business intelligence report.",
"actions": [
{{
"method": "sharepoint",
"action": "search",
"parameters": {{
"query": "sales quarterly report",
"site": "connection_123_msft_testuser@example.com"
}},
"resultLabel": "documentList_abc123_sales_documents"
}},
{{
"method": "excel",
"action": "analyze",
"parameters": {{
"document": "documentList_abc123_sales_documents"
}},
"resultLabel": "documentList_def456_analysis_results"
}}
]
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text, markdown formatting, or additional content outside the JSON structure."""
# Log the generated prompt for debugging
logger.debug("=" * 80)
logger.debug("TASK DEFINITION PROMPT:")
logger.debug("=" * 80)
logger.debug(prompt)
logger.debug("=" * 80)
return prompt
# ===== Utility Methods =====
async def processFileIds(self, fileIds: List[str]) -> List[ChatDocument]:
"""Process file IDs and return ChatDocument objects"""
documents = []
for fileId in fileIds:
try:
# Ensure service is initialized
if not hasattr(self, 'service') or not self.service:
logger.error(f"Service not initialized for file ID {fileId}")
continue
# Get file info from service
fileInfo = self.service.getFileInfo(fileId)
if fileInfo:
# Create document using interface
documentData = {
"fileId": fileId,
"filename": fileInfo.get("filename", "unknown"),
"fileSize": fileInfo.get("size", 0),
"mimeType": fileInfo.get("mimeType", "application/octet-stream")
}
document = self.chatInterface.createChatDocument(documentData)
if document:
documents.append(document)
logger.info(f"Processed file ID {fileId} -> {document.filename}")
else:
logger.warning(f"No file info found for file ID {fileId}")
except Exception as e:
logger.error(f"Error processing file ID {fileId}: {str(e)}")
return documents
def setUserLanguage(self, language: str) -> None:
"""Set user language for the chat manager"""
if hasattr(self, 'service') and self.service:
self.service.user.language = language
# ===== Enhanced Task Planning Methods =====
async def _callAIWithCircuitBreaker(self, prompt: str, context: str) -> str:
"""Call AI with circuit breaker pattern for fault tolerance"""
try:
# Check circuit breaker
if self._isCircuitBreakerOpen():
raise Exception("AI circuit breaker is open - too many recent failures")
# Call AI with timeout
response = await asyncio.wait_for(
self._callAI(prompt, context),
timeout=self.ai_call_timeout
)
# Reset failure count on success
self.ai_failure_count = 0
return response
except asyncio.TimeoutError:
self._recordAIFailure("Timeout")
raise Exception(f"AI call timed out after {self.ai_call_timeout} seconds")
except Exception as e:
self._recordAIFailure(str(e))
raise
def _isCircuitBreakerOpen(self) -> bool:
"""Check if circuit breaker is open"""
if self.ai_failure_count >= self.ai_circuit_breaker_threshold:
if self.ai_last_failure_time:
time_since_failure = (datetime.now(UTC) - self.ai_last_failure_time).total_seconds()
if time_since_failure < self.ai_circuit_breaker_timeout:
return True
else:
# Reset circuit breaker after timeout
self.ai_failure_count = 0
self.ai_last_failure_time = None
return False
def _recordAIFailure(self, error: str):
"""Record AI failure for circuit breaker"""
self.ai_failure_count += 1
self.ai_last_failure_time = datetime.now(UTC)
logger.warning(f"AI failure recorded ({self.ai_failure_count}/{self.ai_circuit_breaker_threshold}): {error}")
async def generateTaskPlan(self, context: Dict[str, Any]) -> Dict[str, Any]:
"""Generate a task plan through AI analysis with enhanced error handling"""
try:
# Prepare prompt for task planning
prompt = self._createTaskPlanningPrompt(context)
# Call AI with circuit breaker
response = await self._callAIWithCircuitBreaker(prompt, "task_planning")
# Parse and validate task plan
task_plan = self._parseTaskPlanResponse(response)
# Validate task plan structure
if not self._validateTaskPlan(task_plan):
raise Exception("Generated task plan failed validation")
logger.info(f"Generated task plan with {len(task_plan.get('tasks', []))} tasks")
return task_plan
except Exception as e:
logger.error(f"Error generating task plan: {str(e)}")
# Return fallback task plan
return self._createFallbackTaskPlan(context)
def _validateTaskPlan(self, task_plan: Dict[str, Any]) -> bool:
"""Validate task plan structure and dependencies"""
try:
if not isinstance(task_plan, dict):
return False
if 'tasks' not in task_plan or not isinstance(task_plan['tasks'], list):
return False
# Check each task
task_ids = set()
for task in task_plan['tasks']:
if not isinstance(task, dict):
return False
required_fields = ['id', 'description', 'expected_outputs', 'success_criteria']
if not all(field in task for field in required_fields):
return False
# Check for duplicate task IDs
if task['id'] in task_ids:
return False
task_ids.add(task['id'])
# Validate dependencies
dependencies = task.get('dependencies', [])
if not isinstance(dependencies, list):
return False
# Check that dependencies reference existing tasks
for dep in dependencies:
if dep not in task_ids and dep != 'task_0': # Allow task_0 as special case
return False
return True
except Exception as e:
logger.error(f"Error validating task plan: {str(e)}")
return False
def _createFallbackTaskPlan(self, context: Dict[str, Any]) -> Dict[str, Any]:
"""Create a fallback task plan when AI generation fails"""
logger.warning("Creating fallback task plan due to AI generation failure")
return {
"overview": "Fallback task plan - basic document analysis and processing",
"tasks": [
{
"id": "task_1",
"description": "Analyze all provided documents",
"dependencies": [],
"expected_outputs": ["document_analysis"],
"success_criteria": ["All documents processed"],
"required_documents": context.get('available_documents', []),
"estimated_complexity": "medium"
},
{
"id": "task_2",
"description": "Generate basic output based on analysis",
"dependencies": ["task_1"],
"expected_outputs": ["basic_output"],
"success_criteria": ["Output generated"],
"required_documents": ["document_analysis"],
"estimated_complexity": "low"
}
]
}
async def generateActionsForTask(self, task_step: Dict[str, Any], workflow: ChatWorkflow, task: TaskItem, improvements: str = None) -> List[Dict[str, Any]]:
"""Generate actions for a specific task step with enhanced validation"""
try:
# Prepare context for action generation
context = {
'task_step': task_step,
'workflow_id': workflow.id,
'task_id': task.id,
'available_documents': self._getAvailableDocuments(workflow),
'previous_results': self._getPreviousResults(task),
'improvements': improvements
}
# Prepare prompt for action generation
prompt = self._createActionGenerationPrompt(context)
# Call AI with circuit breaker
response = await self._callAIWithCircuitBreaker(prompt, "action_generation")
# Parse and validate actions
actions = self._parseActionResponse(response)
# Validate actions
if not self._validateActions(actions, context):
logger.warning("Generated actions failed validation, using fallback actions")
actions = self._createFallbackActions(task_step, context)
logger.info(f"Generated {len(actions)} actions for task step: {task_step.get('description', 'Unknown')}")
return actions
except Exception as e:
logger.error(f"Error generating actions for task: {str(e)}")
return self._createFallbackActions(task_step, context)
def _validateActions(self, actions: List[Dict[str, Any]], context: Dict[str, Any]) -> bool:
"""Validate generated actions"""
try:
if not isinstance(actions, list):
logger.error("Actions must be a list")
return False
if len(actions) == 0:
logger.warning("No actions generated")
return False
for i, action in enumerate(actions):
if not isinstance(action, dict):
logger.error(f"Action {i} must be a dictionary")
return False
# Check required fields
required_fields = ['method', 'action', 'parameters', 'resultLabel']
missing_fields = []
for field in required_fields:
if field not in action or not action[field]:
missing_fields.append(field)
if missing_fields:
logger.error(f"Action {i} missing required fields: {missing_fields}")
return False
# Validate result label format
result_label = action.get('resultLabel', '')
if not result_label.startswith('documentList_'):
logger.error(f"Action {i} result label must start with 'documentList_': {result_label}")
return False
# Validate parameters
parameters = action.get('parameters', {})
if not isinstance(parameters, dict):
logger.error(f"Action {i} parameters must be a dictionary")
return False
logger.info(f"Successfully validated {len(actions)} actions")
return True
except Exception as e:
logger.error(f"Error validating actions: {str(e)}")
return False
def _createFallbackActions(self, task_step: Dict[str, Any], context: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Create fallback actions when AI generation fails"""
logger.warning("Creating fallback actions due to AI generation failure")
# Get available documents
available_docs = context.get('available_documents', [])
if not available_docs:
logger.warning("No available documents for fallback actions")
return []
# Create fallback actions for document analysis
fallback_actions = []
for i, doc in enumerate(available_docs):
fallback_actions.append({
"method": "document",
"action": "analyze",
"parameters": {
"fileId": doc,
"analysis": ["entities", "topics", "sentiment"]
},
"resultLabel": f"documentList_fallback_{task_step.get('id', 'unknown')}_{i}_analysis",
"description": f"Fallback document analysis for {doc}"
})
logger.info(f"Created {len(fallback_actions)} fallback actions")
return fallback_actions
async def reviewTaskStepResults(self, review_context: Dict[str, Any]) -> Dict[str, Any]:
"""Review task step results with enhanced error handling"""
try:
# Prepare prompt for result review
prompt = self._createResultReviewPrompt(review_context)
# Call AI with circuit breaker
response = await self._callAIWithCircuitBreaker(prompt, "result_review")
# Parse review result
review = self._parseReviewResponse(response)
# Add default values for missing fields
review.setdefault('status', 'unknown')
review.setdefault('reason', 'No reason provided')
review.setdefault('quality_score', 5)
logger.info(f"Review result: {review.get('status', 'unknown')}")
return review
except Exception as e:
logger.error(f"Error reviewing task step results: {str(e)}")
return {
'status': 'success', # Default to success to avoid blocking workflow
'reason': f'Review failed: {str(e)}',
'quality_score': 5,
'confidence': 0.5
}
# ===== Prompt Creation Methods =====
def _createTaskPlanningPrompt(self, context: Dict[str, Any]) -> str:
"""Create prompt for task planning"""
return f"""You are a task planning AI that analyzes user requests and creates structured task plans.
USER REQUEST: {context['user_request']}
AVAILABLE DOCUMENTS: {', '.join(context['available_documents'])}
INSTRUCTIONS:
1. Analyze the user request and available documents
2. Break down the request into logical task steps
3. Ensure all documents are properly utilized
4. Create a sequence that ensures proper handover between tasks
5. Return a JSON object with the exact structure shown below
REQUIRED JSON STRUCTURE:
{{
"overview": "Brief description of the overall plan",
"tasks": [
{{
"id": "task_1",
"description": "Clear description of what this task does",
"dependencies": ["task_0"], // IDs of tasks that must complete first
"expected_outputs": ["output1", "output2"],
"success_criteria": ["criteria1", "criteria2"],
"required_documents": ["doc1", "doc2"],
"estimated_complexity": "low|medium|high"
}}
]
}}
EXAMPLE FOR CANDIDATE EVALUATION:
{{
"overview": "Analyze candidate profiles, create evaluation matrix, generate presentation, and store results",
"tasks": [
{{
"id": "task_1",
"description": "Extract and analyze all candidate profiles and position criteria",
"dependencies": [],
"expected_outputs": ["candidate_analysis", "criteria_analysis"],
"success_criteria": ["All 3 candidates analyzed", "Position criteria understood"],
"required_documents": ["candidate_1_profile.txt", "candidate_2_profile.txt", "candidate_3_profile.txt", "product_designer_criteria.txt"],
"estimated_complexity": "medium"
}},
{{
"id": "task_2",
"description": "Create comprehensive evaluation matrix based on criteria",
"dependencies": ["task_1"],
"expected_outputs": ["evaluation_matrix"],
"success_criteria": ["Matrix covers all evaluation criteria", "Scoring system defined"],
"required_documents": ["criteria_analysis"],
"estimated_complexity": "medium"
}},
{{
"id": "task_3",
"description": "Rate all candidates against the evaluation matrix",
"dependencies": ["task_1", "task_2"],
"expected_outputs": ["candidate_ratings", "comparison_analysis"],
"success_criteria": ["All candidates rated", "Clear ranking established"],
"required_documents": ["candidate_analysis", "evaluation_matrix"],
"estimated_complexity": "high"
}},
{{
"id": "task_4",
"description": "Generate professional PowerPoint presentation for management",
"dependencies": ["task_3"],
"expected_outputs": ["presentation_file"],
"success_criteria": ["Executive-ready presentation", "Clear recommendations included"],
"required_documents": ["candidate_ratings", "comparison_analysis"],
"estimated_complexity": "high"
}},
{{
"id": "task_5",
"description": "Store presentation in SharePoint for p.motsch valueon account",
"dependencies": ["task_4"],
"expected_outputs": ["sharepoint_storage_confirmation"],
"success_criteria": ["File uploaded successfully", "Proper access permissions set"],
"required_documents": ["presentation_file"],
"estimated_complexity": "low"
}}
]
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text."""
def _createActionGenerationPrompt(self, context: Dict[str, Any]) -> str:
"""Create prompt for action generation"""
task_step = context['task_step']
available_docs = context['available_documents']
previous_results = context['previous_results']
improvements = context.get('improvements', '')
return f"""You are an action generation AI that creates specific actions to accomplish a task step.
TASK STEP: {task_step.get('description', 'Unknown')}
TASK ID: {task_step.get('id', 'Unknown')}
EXPECTED OUTPUTS: {', '.join(task_step.get('expected_outputs', []))}
SUCCESS CRITERIA: {', '.join(task_step.get('success_criteria', []))}
AVAILABLE DOCUMENTS: {', '.join(available_docs)}
PREVIOUS RESULTS: {', '.join(previous_results) if previous_results else 'None'}
IMPROVEMENTS NEEDED: {improvements if improvements else 'None'}
AVAILABLE METHODS:
{self._getAvailableMethodsDescription()}
INSTRUCTIONS:
1. Generate specific actions to accomplish this task step
2. Use available documents and previous results
3. Ensure proper result labels for handover
4. Follow the exact JSON structure below
5. ALL fields are REQUIRED: method, action, parameters, resultLabel, description
REQUIRED JSON STRUCTURE:
{{
"actions": [
{{
"method": "method_name",
"action": "action_name",
"parameters": {{
"param1": "value1",
"param2": "value2"
}},
"resultLabel": "documentList_uuid_descriptive_label",
"description": "What this action does"
}}
]
}}
FIELD REQUIREMENTS:
- "method": Must be one of the available methods (e.g., "document", "excel", "powerpoint")
- "action": Must be a valid action for that method (e.g., "analyze", "create", "write")
- "parameters": Object with method-specific parameters
- "resultLabel": MUST start with "documentList_" followed by unique identifier and descriptive label
- "description": Clear description of what the action accomplishes
EXAMPLE VALID ACTION:
{{
"method": "document",
"action": "analyze",
"parameters": {{
"fileId": "candidate_1_profile.txt",
"analysis": ["entities", "topics", "sentiment"]
}},
"resultLabel": "documentList_abc123_candidate_analysis",
"description": "Analyze candidate profile for key information extraction"
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text."""
def _createResultReviewPrompt(self, review_context: Dict[str, Any]) -> str:
"""Create prompt for result review"""
task_step = review_context['task_step']
step_result = review_context['step_result']
return f"""You are a result review AI that evaluates task step completion and decides on next actions.
TASK STEP: {task_step.get('description', 'Unknown')}
EXPECTED OUTPUTS: {', '.join(task_step.get('expected_outputs', []))}
SUCCESS CRITERIA: {', '.join(task_step.get('success_criteria', []))}
STEP RESULT: {json.dumps(step_result, indent=2)}
INSTRUCTIONS:
1. Evaluate if the task step was completed successfully
2. Check if all expected outputs were produced
3. Verify if success criteria were met
4. Decide on next action: continue, retry, or fail
5. If retry, provide specific improvements needed
REQUIRED JSON STRUCTURE:
{{
"status": "success|retry|failed",
"reason": "Explanation of the decision",
"improvements": "Specific improvements for retry (if status is retry)",
"quality_score": 1-10,
"missing_outputs": ["output1", "output2"],
"met_criteria": ["criteria1", "criteria2"],
"unmet_criteria": ["criteria3", "criteria4"]
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text."""
# ===== Helper Methods =====
def _getAvailableDocuments(self, workflow: ChatWorkflow) -> List[str]:
"""Get list of available documents in the workflow"""
documents = []
for message in workflow.messages:
for doc in message.documents:
documents.append(doc.filename)
return documents
def _getPreviousResults(self, task: TaskItem) -> List[str]:
"""Get list of previous results from completed actions"""
results = []
for action in task.actionList:
if action.execResultLabel:
results.append(action.execResultLabel)
return results
def _getAvailableMethodsDescription(self) -> str:
"""Get description of available methods for action generation"""
try:
if hasattr(self, 'service') and self.service:
methods = self.service.getMethodsList()
return '\n'.join([f"- {method}" for method in methods])
else:
return """- document.analyze: Analyze document content
- document.extract: Extract content from document
- document.summarize: Summarize document content
- excel.create: Create new Excel file
- excel.write: Write data to Excel file
- powerpoint.createPresentation: Create new PowerPoint presentation
- powerpoint.addSlide: Add slide to presentation
- sharepoint.write: Write to SharePoint
- operator.aiCall: Call AI service with content"""
except Exception as e:
logger.error(f"Error getting available methods: {str(e)}")
return "- document.analyze: Analyze document content"
def _parseTaskPlanResponse(self, response: str) -> Dict[str, Any]:
"""Parse AI response into task plan structure"""
try:
# Extract JSON from response
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
task_plan = json.loads(json_str)
# Validate structure
if 'tasks' not in task_plan:
raise ValueError("Task plan missing 'tasks' field")
return task_plan
except Exception as e:
logger.error(f"Error parsing task plan response: {str(e)}")
return {'tasks': []}
def _parseActionResponse(self, response: str) -> List[Dict[str, Any]]:
"""Parse AI response into action list"""
try:
# Extract JSON from response
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
action_data = json.loads(json_str)
# Validate structure
if 'actions' not in action_data:
raise ValueError("Action response missing 'actions' field")
return action_data['actions']
except Exception as e:
logger.error(f"Error parsing action response: {str(e)}")
return []
def _parseReviewResponse(self, response: str) -> Dict[str, Any]:
"""Parse AI response into review result"""
try:
# Extract JSON from response
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
review = json.loads(json_str)
# Validate structure
if 'status' not in review:
raise ValueError("Review response missing 'status' field")
return review
except Exception as e:
logger.error(f"Error parsing review response: {str(e)}")
return {'status': 'failed', 'reason': f'Parse error: {str(e)}'}
async def _callAI(self, prompt: str, context: str) -> str:
"""Call AI service with prompt"""
try:
# Use the existing AI call mechanism through service
if hasattr(self, 'service') and self.service:
# Ensure service is properly initialized
if hasattr(self.service, 'callAiTextBasic'):
response = await self.service.callAiTextBasic(prompt)
return response
else:
raise Exception("Service does not have callAiTextBasic method")
else:
raise Exception("No service available for AI calls")
except Exception as e:
logger.error(f"Error calling AI for {context}: {str(e)}")
raise
async def executeAction(self, action: Dict[str, Any], workflow: ChatWorkflow) -> Dict[str, Any]:
"""Execute a single action"""
try:
# Create action prompt
prompt = f"""Execute the following action and return ONLY a JSON response.
Action: {action.get('method', 'unknown')}.{action.get('action', 'unknown')}
Parameters: {json.dumps(action.get('parameters', {}), indent=2)}
Description: {action.get('description', 'No description provided')}
CRITICAL: Respond with ONLY a JSON object in this exact format:
{{
"result": "Description of what was accomplished",
"resultLabel": "documentList_{action.get('method', 'unknown')}_{action.get('action', 'unknown')}_result",
"documents": [
"document_{action.get('method', 'unknown')}_{action.get('action', 'unknown')}_output.txt"
],
"error": ""
}}
DO NOT include any explanatory text, markdown formatting, or additional content outside the JSON structure.
DO NOT use code blocks or backticks.
Return ONLY the JSON object."""
# Get AI response
response = await self._callAIWithCircuitBreaker(prompt, "action_execution")
# Parse response
result = self._extractJsonFromResponse(response)
if not result:
logger.error(f"Invalid JSON in action result: {response}")
return {
"status": "failed",
"error": "Invalid result format",
"action": action
}
return {
"status": "completed" if not result.get("error") else "failed",
"result": result.get("result", ""),
"error": result.get("error", ""),
"resultLabel": result.get("resultLabel", ""),
"documents": result.get("documents", []),
"action": action
}
except Exception as e:
logger.error(f"Error executing action: {str(e)}")
return {
"status": "failed",
"error": str(e),
"action": action
}