1194 lines
51 KiB
Python
1194 lines
51 KiB
Python
import asyncio
|
|
import logging
|
|
import uuid
|
|
import json
|
|
from typing import Dict, Any, Optional, List, Union
|
|
from datetime import datetime, UTC
|
|
|
|
from modules.interfaces.interfaceAppModel import User
|
|
from modules.interfaces.interfaceChatModel import (
|
|
TaskStatus, ChatDocument, TaskItem, TaskAction, TaskResult, ChatStat, ChatLog, ChatMessage, ChatWorkflow
|
|
)
|
|
from modules.workflow.serviceContainer import ServiceContainer
|
|
from modules.interfaces.interfaceChatObjects import ChatObjects
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class ChatManager:
|
|
"""Chat manager with improved AI integration and method handling"""
|
|
|
|
def __init__(self, currentUser: User, chatInterface: ChatObjects):
|
|
self.currentUser = currentUser
|
|
self.chatInterface = chatInterface
|
|
self.service: ServiceContainer = None
|
|
self.workflow: ChatWorkflow = None
|
|
|
|
# Circuit breaker for AI calls
|
|
self.ai_failure_count = 0
|
|
self.ai_last_failure_time = None
|
|
self.ai_circuit_breaker_threshold = 5
|
|
self.ai_circuit_breaker_timeout = 300 # 5 minutes
|
|
|
|
# Timeout settings
|
|
self.ai_call_timeout = 120 # 2 minutes
|
|
self.task_execution_timeout = 600 # 10 minutes
|
|
|
|
# ===== Initialization and Setup =====
|
|
async def initialize(self, workflow: ChatWorkflow) -> None:
|
|
"""Initialize chat manager with workflow"""
|
|
self.workflow = workflow
|
|
self.service = ServiceContainer(self.currentUser, self.workflow)
|
|
|
|
# ===== WORKFLOW PHASES =====
|
|
|
|
# Phase 1: High-Level Task Planning
|
|
async def planHighLevelTasks(self, userInput: str, workflow: ChatWorkflow) -> Dict[str, Any]:
|
|
"""Phase 1: Plan high-level tasks from user input"""
|
|
try:
|
|
logger.info(f"Planning high-level tasks for workflow {workflow.id}")
|
|
|
|
# Create planning prompt
|
|
prompt = self._createTaskPlanningPrompt({
|
|
'user_request': userInput,
|
|
'available_documents': self._getAvailableDocuments(workflow),
|
|
'workflow_id': workflow.id
|
|
})
|
|
|
|
# Get AI response
|
|
response = await self.service.callAiTextAdvanced(prompt)
|
|
|
|
# Parse and validate task plan
|
|
task_plan = self._parseTaskPlanResponse(response)
|
|
|
|
if not self._validateTaskPlan(task_plan):
|
|
logger.warning("Generated task plan failed validation, using fallback")
|
|
task_plan = self._createFallbackTaskPlan({
|
|
'user_request': userInput,
|
|
'available_documents': self._getAvailableDocuments(workflow)
|
|
})
|
|
|
|
logger.info(f"High-level task planning completed: {len(task_plan.get('tasks', []))} tasks")
|
|
return task_plan
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in high-level task planning: {str(e)}")
|
|
return self._createFallbackTaskPlan({
|
|
'user_request': userInput,
|
|
'available_documents': self._getAvailableDocuments(workflow)
|
|
})
|
|
|
|
# Phase 2: Task Definition and Action Generation
|
|
async def defineTaskActions(self, task_step: Dict[str, Any], workflow: ChatWorkflow, previous_results: List[str] = None) -> List[TaskAction]:
|
|
"""Phase 2: Define specific actions for a task step"""
|
|
try:
|
|
logger.info(f"Defining actions for task: {task_step.get('description', 'Unknown')}")
|
|
|
|
# Prepare context for action generation
|
|
context = {
|
|
'task_step': task_step,
|
|
'workflow': workflow,
|
|
'workflow_id': workflow.id,
|
|
'available_documents': self._getAvailableDocuments(workflow),
|
|
'previous_results': previous_results or [],
|
|
'improvements': None
|
|
}
|
|
|
|
# Generate actions using AI
|
|
actions = await self._generateActionsForTaskStep(context)
|
|
|
|
# Convert to TaskAction objects
|
|
task_actions = []
|
|
for action_dict in actions:
|
|
action_data = {
|
|
"execMethod": action_dict.get('method', 'unknown'),
|
|
"execAction": action_dict.get('action', 'unknown'),
|
|
"execParameters": action_dict.get('parameters', {}),
|
|
"execResultLabel": action_dict.get('resultLabel', ''),
|
|
"status": TaskStatus.PENDING
|
|
}
|
|
|
|
task_action = self.chatInterface.createTaskAction(action_data)
|
|
if task_action:
|
|
task_actions.append(task_action)
|
|
logger.info(f"Created task action: {task_action.execMethod}.{task_action.execAction}")
|
|
|
|
logger.info(f"Task action definition completed: {len(task_actions)} actions")
|
|
return task_actions
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error defining task actions: {str(e)}")
|
|
return []
|
|
|
|
# Phase 3: Action Execution
|
|
async def executeTaskActions(self, task_actions: List[TaskAction], workflow: ChatWorkflow) -> List[Dict[str, Any]]:
|
|
"""Phase 3: Execute all actions for a task"""
|
|
try:
|
|
logger.info(f"Executing {len(task_actions)} task actions")
|
|
|
|
results = []
|
|
for i, action in enumerate(task_actions):
|
|
logger.info(f"Executing action {i+1}/{len(task_actions)}: {action.execMethod}.{action.execAction}")
|
|
|
|
# Execute single action
|
|
result = await self._executeSingleAction(action, workflow)
|
|
results.append(result)
|
|
|
|
# If action failed, stop execution
|
|
if result.get('status') == 'failed':
|
|
logger.error(f"Action {i+1} failed, stopping task execution")
|
|
break
|
|
|
|
logger.info(f"Task action execution completed: {len(results)} results")
|
|
return results
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error executing task actions: {str(e)}")
|
|
return []
|
|
|
|
# Phase 4: Task Review and Quality Assessment
|
|
async def reviewTaskCompletion(self, task_step: Dict[str, Any], task_actions: List[TaskAction],
|
|
action_results: List[Dict[str, Any]], workflow: ChatWorkflow) -> Dict[str, Any]:
|
|
"""Phase 4: Review task completion and decide next steps"""
|
|
try:
|
|
logger.info(f"Reviewing task completion: {task_step.get('description', 'Unknown')}")
|
|
|
|
# Create step result summary from action results
|
|
step_result = {
|
|
'task_step': task_step,
|
|
'action_results': action_results,
|
|
'successful_actions': sum(1 for result in action_results if result.get('status') == 'completed'),
|
|
'total_actions': len(action_results),
|
|
'results': [result.get('result', '') for result in action_results if result.get('status') == 'completed'],
|
|
'errors': [result.get('error', '') for result in action_results if result.get('status') == 'failed']
|
|
}
|
|
|
|
# Prepare review context
|
|
review_context = {
|
|
'task_step': task_step,
|
|
'task_actions': task_actions,
|
|
'action_results': action_results,
|
|
'step_result': step_result, # Add the missing step_result
|
|
'workflow_id': workflow.id,
|
|
'previous_results': self._getPreviousResultsFromActions(task_actions)
|
|
}
|
|
|
|
# Use AI to review the results
|
|
review = await self._performTaskReview(review_context)
|
|
|
|
# Add quality metrics
|
|
review['quality_metrics'] = self._calculateTaskQualityMetrics(task_step, action_results)
|
|
|
|
logger.info(f"Task review completed: {review.get('status', 'unknown')}")
|
|
return review
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error reviewing task completion: {str(e)}")
|
|
return {
|
|
'status': 'failed',
|
|
'reason': f'Review failed: {str(e)}',
|
|
'quality_metrics': {'score': 0, 'confidence': 0}
|
|
}
|
|
|
|
# Phase 5: Task Handover and State Management
|
|
async def prepareTaskHandover(self, task_step: Dict[str, Any], task_actions: List[TaskAction],
|
|
review_result: Dict[str, Any], workflow: ChatWorkflow) -> Dict[str, Any]:
|
|
"""Phase 5: Prepare results for next task or workflow completion"""
|
|
try:
|
|
logger.info(f"Preparing task handover: {task_step.get('description', 'Unknown')}")
|
|
|
|
# Update task actions with results
|
|
for action in task_actions:
|
|
if action.status == TaskStatus.PENDING:
|
|
action.status = TaskStatus.COMPLETED if review_result.get('status') == 'success' else TaskStatus.FAILED
|
|
|
|
# Create handover data
|
|
handover_data = {
|
|
'task_step': task_step,
|
|
'task_actions': task_actions,
|
|
'review_result': review_result,
|
|
'next_task_ready': review_result.get('status') == 'success',
|
|
'available_results': self._getPreviousResultsFromActions(task_actions)
|
|
}
|
|
|
|
logger.info(f"Task handover prepared: next_task_ready={handover_data['next_task_ready']}")
|
|
return handover_data
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error preparing task handover: {str(e)}")
|
|
return {
|
|
'task_step': task_step,
|
|
'task_actions': task_actions,
|
|
'review_result': review_result,
|
|
'next_task_ready': False,
|
|
'available_results': []
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# ===== Utility Methods =====
|
|
|
|
async def processFileIds(self, fileIds: List[str]) -> List[ChatDocument]:
|
|
"""Process file IDs and return ChatDocument objects"""
|
|
documents = []
|
|
for fileId in fileIds:
|
|
try:
|
|
# Ensure service is initialized
|
|
if not hasattr(self, 'service') or not self.service:
|
|
logger.error(f"Service not initialized for file ID {fileId}")
|
|
continue
|
|
|
|
# Get file info from service
|
|
fileInfo = self.service.getFileInfo(fileId)
|
|
if fileInfo:
|
|
# Create document using interface
|
|
documentData = {
|
|
"fileId": fileId,
|
|
"filename": fileInfo.get("filename", "unknown"),
|
|
"fileSize": fileInfo.get("size", 0),
|
|
"mimeType": fileInfo.get("mimeType", "application/octet-stream")
|
|
}
|
|
document = self.chatInterface.createChatDocument(documentData)
|
|
if document:
|
|
documents.append(document)
|
|
logger.info(f"Processed file ID {fileId} -> {document.filename}")
|
|
else:
|
|
logger.warning(f"No file info found for file ID {fileId}")
|
|
except Exception as e:
|
|
logger.error(f"Error processing file ID {fileId}: {str(e)}")
|
|
return documents
|
|
|
|
def setUserLanguage(self, language: str) -> None:
|
|
"""Set user language for the chat manager"""
|
|
if hasattr(self, 'service') and self.service:
|
|
self.service.user.language = language
|
|
|
|
# ===== Enhanced Task Planning Methods =====
|
|
|
|
async def _callAIWithCircuitBreaker(self, prompt: str, context: str) -> str:
|
|
"""Call AI with circuit breaker pattern for fault tolerance"""
|
|
try:
|
|
# Check circuit breaker
|
|
if self._isCircuitBreakerOpen():
|
|
raise Exception("AI circuit breaker is open - too many recent failures")
|
|
|
|
# Call AI with timeout
|
|
logger.debug(f"ACTION GENERATION PROMPT: {prompt}")
|
|
response = await asyncio.wait_for(
|
|
self._callAI(prompt, context),
|
|
timeout=self.ai_call_timeout
|
|
)
|
|
|
|
# Reset failure count on success
|
|
self.ai_failure_count = 0
|
|
return response
|
|
|
|
except asyncio.TimeoutError:
|
|
self._recordAIFailure("Timeout")
|
|
raise Exception(f"AI call timed out after {self.ai_call_timeout} seconds")
|
|
except Exception as e:
|
|
self._recordAIFailure(str(e))
|
|
raise
|
|
|
|
def _isCircuitBreakerOpen(self) -> bool:
|
|
"""Check if circuit breaker is open"""
|
|
if self.ai_failure_count >= self.ai_circuit_breaker_threshold:
|
|
if self.ai_last_failure_time:
|
|
time_since_failure = (datetime.now(UTC) - self.ai_last_failure_time).total_seconds()
|
|
if time_since_failure < self.ai_circuit_breaker_timeout:
|
|
return True
|
|
else:
|
|
# Reset circuit breaker after timeout
|
|
self.ai_failure_count = 0
|
|
self.ai_last_failure_time = None
|
|
return False
|
|
|
|
def _recordAIFailure(self, error: str):
|
|
"""Record AI failure for circuit breaker"""
|
|
self.ai_failure_count += 1
|
|
self.ai_last_failure_time = datetime.now(UTC)
|
|
logger.warning(f"AI failure recorded ({self.ai_failure_count}/{self.ai_circuit_breaker_threshold}): {error}")
|
|
|
|
def _validateTaskPlan(self, task_plan: Dict[str, Any]) -> bool:
|
|
"""Validate task plan structure and dependencies"""
|
|
try:
|
|
if not isinstance(task_plan, dict):
|
|
return False
|
|
|
|
if 'tasks' not in task_plan or not isinstance(task_plan['tasks'], list):
|
|
return False
|
|
|
|
# Check each task
|
|
task_ids = set()
|
|
for task in task_plan['tasks']:
|
|
if not isinstance(task, dict):
|
|
return False
|
|
|
|
required_fields = ['id', 'description', 'expected_outputs', 'success_criteria']
|
|
if not all(field in task for field in required_fields):
|
|
return False
|
|
|
|
# Check for duplicate task IDs
|
|
if task['id'] in task_ids:
|
|
return False
|
|
task_ids.add(task['id'])
|
|
|
|
# Validate dependencies
|
|
dependencies = task.get('dependencies', [])
|
|
if not isinstance(dependencies, list):
|
|
return False
|
|
|
|
# Check that dependencies reference existing tasks
|
|
for dep in dependencies:
|
|
if dep not in task_ids and dep != 'task_0': # Allow task_0 as special case
|
|
return False
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error validating task plan: {str(e)}")
|
|
return False
|
|
|
|
def _createFallbackTaskPlan(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Create a fallback task plan when AI generation fails"""
|
|
logger.warning("Creating fallback task plan due to AI generation failure")
|
|
|
|
return {
|
|
"overview": "Fallback task plan - basic document analysis and processing",
|
|
"tasks": [
|
|
{
|
|
"id": "task_1",
|
|
"description": "Analyze all provided documents",
|
|
"dependencies": [],
|
|
"expected_outputs": ["document_analysis"],
|
|
"success_criteria": ["All documents processed"],
|
|
"required_documents": context.get('available_documents', []),
|
|
"estimated_complexity": "medium"
|
|
},
|
|
{
|
|
"id": "task_2",
|
|
"description": "Generate basic output based on analysis",
|
|
"dependencies": ["task_1"],
|
|
"expected_outputs": ["basic_output"],
|
|
"success_criteria": ["Output generated"],
|
|
"required_documents": ["document_analysis"],
|
|
"estimated_complexity": "low"
|
|
}
|
|
]
|
|
}
|
|
|
|
def _validateActions(self, actions: List[Dict[str, Any]], context: Dict[str, Any]) -> bool:
|
|
"""Validate generated actions"""
|
|
try:
|
|
if not isinstance(actions, list):
|
|
logger.error("Actions must be a list")
|
|
return False
|
|
|
|
if len(actions) == 0:
|
|
logger.warning("No actions generated")
|
|
return False
|
|
|
|
for i, action in enumerate(actions):
|
|
if not isinstance(action, dict):
|
|
logger.error(f"Action {i} must be a dictionary")
|
|
return False
|
|
|
|
# Check required fields
|
|
required_fields = ['method', 'action', 'parameters', 'resultLabel']
|
|
missing_fields = []
|
|
for field in required_fields:
|
|
if field not in action or not action[field]:
|
|
missing_fields.append(field)
|
|
|
|
if missing_fields:
|
|
logger.error(f"Action {i} missing required fields: {missing_fields}")
|
|
return False
|
|
|
|
# Validate result label format
|
|
result_label = action.get('resultLabel', '')
|
|
if not result_label.startswith('mdoc:'):
|
|
logger.error(f"Action {i} result label must start with 'mdoc:': {result_label}")
|
|
return False
|
|
|
|
# Validate parameters
|
|
parameters = action.get('parameters', {})
|
|
if not isinstance(parameters, dict):
|
|
logger.error(f"Action {i} parameters must be a dictionary")
|
|
return False
|
|
|
|
logger.info(f"Successfully validated {len(actions)} actions")
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error validating actions: {str(e)}")
|
|
return False
|
|
|
|
def _createFallbackActions(self, task_step: Dict[str, Any], context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
"""Create fallback actions when AI generation fails"""
|
|
logger.warning("Creating fallback actions due to AI generation failure")
|
|
|
|
# Get available documents
|
|
available_docs = context.get('available_documents', [])
|
|
if not available_docs:
|
|
logger.warning("No available documents for fallback actions")
|
|
return []
|
|
|
|
# Create fallback actions for document analysis
|
|
fallback_actions = []
|
|
for i, doc in enumerate(available_docs):
|
|
fallback_actions.append({
|
|
"method": "document",
|
|
"action": "analyze",
|
|
"parameters": {
|
|
"fileId": doc,
|
|
"analysis": ["entities", "topics", "sentiment"]
|
|
},
|
|
"resultLabel": f"mdoc:fallback:{task_step.get('id', 'unknown')}:{i}:analysis",
|
|
"description": f"Fallback document analysis for {doc}"
|
|
})
|
|
|
|
logger.info(f"Created {len(fallback_actions)} fallback actions")
|
|
return fallback_actions
|
|
|
|
# ===== Prompt Creation Methods =====
|
|
|
|
def _createTaskPlanningPrompt(self, context: Dict[str, Any]) -> str:
|
|
"""Create prompt for task planning"""
|
|
return f"""You are a task planning AI that analyzes user requests and creates structured task plans.
|
|
|
|
USER REQUEST: {context['user_request']}
|
|
|
|
AVAILABLE DOCUMENTS: {', '.join(context['available_documents'])}
|
|
|
|
INSTRUCTIONS:
|
|
1. Analyze the user request and available documents
|
|
2. Break down the request into logical task steps
|
|
3. Ensure all documents are properly utilized
|
|
4. Create a sequence that ensures proper handover between tasks
|
|
5. Return a JSON object with the exact structure shown below
|
|
|
|
REQUIRED JSON STRUCTURE:
|
|
{{
|
|
"overview": "Brief description of the overall plan",
|
|
"tasks": [
|
|
{{
|
|
"id": "task_1",
|
|
"description": "Clear description of what this task does",
|
|
"dependencies": ["task_0"], // IDs of tasks that must complete first
|
|
"expected_outputs": ["output1", "output2"],
|
|
"success_criteria": ["criteria1", "criteria2"],
|
|
"required_documents": ["doc1", "doc2"],
|
|
"estimated_complexity": "low|medium|high"
|
|
}}
|
|
]
|
|
}}
|
|
|
|
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
|
|
|
|
async def _createActionDefinitionPrompt(self, context: Dict[str, Any]) -> str:
|
|
"""Create prompt for action generation"""
|
|
task_step = context['task_step']
|
|
workflow = context.get('workflow')
|
|
available_docs = context['available_documents']
|
|
previous_results = context['previous_results']
|
|
improvements = context.get('improvements', '')
|
|
|
|
# Get available methods
|
|
methodList = self.service.getMethodsList()
|
|
|
|
# Get workflow history
|
|
messageSummary = await self.service.summarizeChat(workflow.messages)
|
|
|
|
# Get available documents and connections
|
|
docRefs = self.service.getDocumentReferenceList()
|
|
connRefs = self.service.getConnectionReferenceList()
|
|
|
|
return f"""You are an action generation AI that creates specific actions to accomplish a task step.
|
|
|
|
TASK STEP: {task_step.get('description', 'Unknown')}
|
|
TASK ID: {task_step.get('id', 'Unknown')}
|
|
|
|
EXPECTED OUTPUTS:
|
|
{', '.join(task_step.get('expected_outputs', []))}
|
|
|
|
SUCCESS CRITERIA:
|
|
{', '.join(task_step.get('success_criteria', []))}
|
|
|
|
CONTEXT - Chat History:
|
|
{messageSummary}
|
|
|
|
AVAILABLE METHODS
|
|
{chr(10).join(f"- {method}" for method in methodList)}
|
|
|
|
AVAILABLE CONNECTIONS
|
|
{chr(10).join(f"- {conn}" for conn in connRefs)}
|
|
|
|
AVAILABLE DOCUMENTS
|
|
{chr(10).join(f"- {doc['documentReference']} ({doc['actionMethod']}.{doc['actionName']}, {doc['documentCount']} documents, {doc['datetime']})" for doc in docRefs.get('chat', []))}
|
|
|
|
PREVIOUS RESULTS:
|
|
{', '.join(previous_results) if previous_results else 'None'}
|
|
|
|
IMPROVEMENTS NEEDED:
|
|
{improvements if improvements else 'None'}
|
|
|
|
INSTRUCTIONS:
|
|
1. Generate specific actions to accomplish this task step
|
|
2. Use available documents, connections, and previous results
|
|
3. Ensure proper result labels for handover
|
|
4. Follow the exact JSON structure below
|
|
5. ALL fields are REQUIRED: method, action, parameters, resultLabel, description
|
|
|
|
REQUIRED JSON STRUCTURE:
|
|
{{
|
|
"actions": [
|
|
{{
|
|
"method": "method_name",
|
|
"action": "action_name",
|
|
"parameters": {{
|
|
"param1": "value1",
|
|
"param2": "value2",
|
|
}},
|
|
"resultLabel": "mdoc:uuid:descriptiveLabel",
|
|
"description": "What this action does"
|
|
}}
|
|
]
|
|
}}
|
|
|
|
FIELD REQUIREMENTS:
|
|
- "method": Must be one of the available methods listed above
|
|
- "action": Must be a valid action for that method
|
|
- "parameters": Object with method-specific parameters
|
|
- "resultLabel": MUST start with "mdoc:" followed by unique identifier and descriptive label
|
|
- "description": Clear description of what the action accomplishes
|
|
|
|
MANDATORY PARAMETER AND RETURN VALUE RULES:
|
|
|
|
1. CONNECTION PARAMETERS:
|
|
- Parameter name: "connectionReference" (NOT "connection", "site", "connectionId", etc.)
|
|
- Value: Must be a connection reference from "Connections" section above
|
|
- Format: "connection:authority:user:connectionId"
|
|
- Example: "connection:msft:testuser@example.com:1234"
|
|
|
|
2. DOCUMENT PARAMETERS:
|
|
- Parameter name: "documentReference" (NOT "document", "fileId", "documents", etc.)
|
|
- Value: Must be a document reference from "Documents" section or previous results
|
|
- Format: "mdoc:uuid:descriptiveLabel"
|
|
- Document references represent a LIST of documents, not single documents
|
|
- All document inputs expect documentList references
|
|
|
|
3. RETURN VALUES:
|
|
- ALL actions must return documentList references in resultLabel
|
|
- Result labels must start with "mdoc:"
|
|
- Each action creates a unique documentList for handover
|
|
- Document lists can contain 0, 1, or multiple documents
|
|
- No actions return single documents - always documentLists
|
|
|
|
4. PARAMETER VALIDATION:
|
|
- Use only document references from "Documents" section above
|
|
- Use only connection references from "Connections" section above
|
|
- Use result labels from previous results in the sequence
|
|
- All parameter values must be strings
|
|
- Document references show: method.action - document count - timestamp
|
|
|
|
5. RESULT USAGE RULES:
|
|
- Previous results can be referenced as: "mdoc:uuid:label"
|
|
- Use result labels from previous actions in the sequence
|
|
- Example: If previous action created "mdoc:abc123:salesData",
|
|
reference it as "mdoc:abc123:salesData" in parameters
|
|
- Results are available in the PREVIOUS RESULTS section above
|
|
- Each action should create a unique resultLabel for handover to next actions
|
|
- Result labels should be descriptive and indicate the content type
|
|
|
|
METHOD-SPECIFIC PARAMETER REQUIREMENTS:
|
|
|
|
- coder: Uses "code" (string), "language" (string), "requirements" (string)
|
|
- document: Uses "documentReference" (documentList), "fileId" (string for single files)
|
|
- excel: Uses "connectionReference" (connection), "fileId" (string)
|
|
- operator: Uses "items" (array), "prompt" (string), "documents" (array of documentReferences)
|
|
- outlook: Uses "connectionReference" (connection)
|
|
- powerpoint: Uses "connectionReference" (connection), "fileId" (string)
|
|
- sharepoint: Uses "connectionReference" (connection)
|
|
- web: Uses "query" (string), "url" (string)
|
|
|
|
EXAMPLE VALID ACTIONS:
|
|
|
|
1. SharePoint Search:
|
|
{{
|
|
"method": "sharepoint",
|
|
"action": "search",
|
|
"parameters": {{
|
|
"connectionReference": "connection:msft:testuser@example.com:1234",
|
|
"query": "sales quarterly report"
|
|
}},
|
|
"resultLabel": "mdoc:abc123:salesDocuments",
|
|
"description": "Search SharePoint for sales documents"
|
|
}}
|
|
|
|
2. Document Analysis using previous results:
|
|
{{
|
|
"method": "document",
|
|
"action": "analyze",
|
|
"parameters": {{
|
|
"documentReference": "mdoc:def456:customerData",
|
|
"analysis": ["entities", "topics", "sentiment"]
|
|
}},
|
|
"resultLabel": "mdoc:ghi789:customerAnalysis",
|
|
"description": "Analyze customer data for insights"
|
|
}}
|
|
|
|
3. Excel Read:
|
|
{{
|
|
"method": "excel",
|
|
"action": "read",
|
|
"parameters": {{
|
|
"connectionReference": "connection:msft:testuser@example.com:1234",
|
|
"fileId": "excel_file_123",
|
|
"sheetName": "Sheet1"
|
|
}},
|
|
"resultLabel": "mdoc:jkl012:excelData",
|
|
"description": "Read data from Excel file"
|
|
}}
|
|
|
|
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
|
|
|
|
|
|
def _createResultReviewPrompt(self, review_context: Dict[str, Any]) -> str:
|
|
"""Create prompt for result review"""
|
|
task_step = review_context['task_step']
|
|
step_result = review_context['step_result']
|
|
|
|
return f"""You are a result review AI that evaluates task step completion and decides on next actions.
|
|
|
|
TASK STEP: {task_step.get('description', 'Unknown')}
|
|
EXPECTED OUTPUTS: {', '.join(task_step.get('expected_outputs', []))}
|
|
SUCCESS CRITERIA: {', '.join(task_step.get('success_criteria', []))}
|
|
|
|
STEP RESULT: {json.dumps(step_result, indent=2)}
|
|
|
|
INSTRUCTIONS:
|
|
1. Evaluate if the task step was completed successfully
|
|
2. Check if all expected outputs were produced
|
|
3. Verify if success criteria were met
|
|
4. Decide on next action: continue, retry, or fail
|
|
5. If retry, provide specific improvements needed
|
|
|
|
REQUIRED JSON STRUCTURE:
|
|
{{
|
|
"status": "success|retry|failed",
|
|
"reason": "Explanation of the decision",
|
|
"improvements": "Specific improvements for retry (if status is retry)",
|
|
"quality_score": 1-10,
|
|
"missing_outputs": ["output1", "output2"],
|
|
"met_criteria": ["criteria1", "criteria2"],
|
|
"unmet_criteria": ["criteria3", "criteria4"]
|
|
}}
|
|
|
|
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
|
|
|
|
# ===== HELPER METHODS FOR WORKFLOW PHASES =====
|
|
|
|
async def _generateActionsForTaskStep(self, context: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
"""Generate actions for a specific task step"""
|
|
try:
|
|
# Prepare prompt for action generation
|
|
prompt = await self._createActionDefinitionPrompt(context)
|
|
|
|
# Call AI with circuit breaker
|
|
response = await self._callAIWithCircuitBreaker(prompt, "action_generation")
|
|
|
|
# Parse and validate actions
|
|
actions = self._parseActionResponse(response)
|
|
|
|
# Validate actions
|
|
if not self._validateActions(actions, context):
|
|
logger.warning("Generated actions failed validation, using fallback actions")
|
|
actions = self._createFallbackActions(context['task_step'], context)
|
|
|
|
logger.info(f"Generated {len(actions)} actions for task step")
|
|
return actions
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error generating actions for task step: {str(e)}")
|
|
return self._createFallbackActions(context['task_step'], context)
|
|
|
|
async def _executeSingleAction(self, action: TaskAction, workflow: ChatWorkflow) -> Dict[str, Any]:
|
|
"""Execute a single action and return result"""
|
|
try:
|
|
# Execute the actual method action using the service container
|
|
result = await self.service.executeAction(
|
|
methodName=action.execMethod,
|
|
actionName=action.execAction,
|
|
parameters=action.execParameters
|
|
)
|
|
|
|
# Update action based on result
|
|
if result.success:
|
|
action.setSuccess()
|
|
action.result = result.data.get("result", "")
|
|
action.execResultLabel = result.data.get("resultLabel", "")
|
|
|
|
# Create and store message in workflow for successful action
|
|
await self._createActionMessage(action, result, workflow)
|
|
|
|
else:
|
|
action.setError(result.error or "Action execution failed")
|
|
|
|
return {
|
|
"status": "completed" if result.success else "failed",
|
|
"result": result.data.get("result", ""),
|
|
"error": result.error or "",
|
|
"resultLabel": result.data.get("resultLabel", ""),
|
|
"documents": result.data.get("documents", []),
|
|
"action": action
|
|
}
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error executing single action: {str(e)}")
|
|
action.setError(str(e))
|
|
return {
|
|
"status": "failed",
|
|
"error": str(e),
|
|
"action": action
|
|
}
|
|
|
|
async def _createActionMessage(self, action: TaskAction, result: Any, workflow: ChatWorkflow) -> None:
|
|
"""Create and store a message for the action result in the workflow"""
|
|
try:
|
|
# Get result data
|
|
result_data = result.data if hasattr(result, 'data') else {}
|
|
result_label = result_data.get("resultLabel", "")
|
|
documents_data = result_data.get("documents", [])
|
|
|
|
# Create message data
|
|
message_data = {
|
|
"workflowId": workflow.id,
|
|
"role": "assistant",
|
|
"message": f"Executed {action.execMethod}.{action.execAction} successfully",
|
|
"status": "step",
|
|
"sequenceNr": len(workflow.messages) + 1,
|
|
"publishedAt": datetime.now(UTC).isoformat(),
|
|
"actionId": action.id,
|
|
"actionMethod": action.execMethod,
|
|
"actionName": action.execAction,
|
|
"documentsLabel": result_label, # Use resultLabel as documentsLabel
|
|
"documents": []
|
|
}
|
|
|
|
# Process documents if any
|
|
if documents_data:
|
|
processed_documents = []
|
|
for doc_data in documents_data:
|
|
try:
|
|
# Extract document information
|
|
document_name = doc_data.get("documentName", f"{action.execMethod}_{action.execAction}_{datetime.now(UTC).strftime('%Y%m%d_%H%M%S')}")
|
|
document_data = doc_data.get("documentData", {})
|
|
|
|
# Determine file extension and MIME type
|
|
file_extension = self._getFileExtension(document_name)
|
|
mime_type = self._getMimeType(file_extension)
|
|
|
|
# Convert document data to string content
|
|
content = self._convertDocumentDataToString(document_data, file_extension)
|
|
|
|
# Create file in database
|
|
file_id = self.service.createFile(
|
|
fileName=document_name,
|
|
mimeType=mime_type,
|
|
content=content,
|
|
base64encoded=False
|
|
)
|
|
|
|
# Create ChatDocument object
|
|
document = self.service.createDocument(
|
|
fileName=document_name,
|
|
mimeType=mime_type,
|
|
content=content,
|
|
base64encoded=False
|
|
)
|
|
|
|
if document:
|
|
processed_documents.append(document)
|
|
logger.info(f"Created document: {document_name} with file ID: {file_id}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error processing document {doc_data.get('documentName', 'unknown')}: {str(e)}")
|
|
continue
|
|
|
|
# Update message with processed documents
|
|
message_data["documents"] = processed_documents
|
|
|
|
# Create message using interface
|
|
message = self.chatInterface.createWorkflowMessage(message_data)
|
|
if message:
|
|
workflow.messages.append(message)
|
|
logger.info(f"Created action message for {action.execMethod}.{action.execAction} with {len(message_data.get('documents', []))} documents")
|
|
else:
|
|
logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error creating action message: {str(e)}")
|
|
|
|
def _getFileExtension(self, filename: str) -> str:
|
|
"""Extract file extension from filename"""
|
|
if '.' in filename:
|
|
return filename.split('.')[-1].lower()
|
|
return "txt" # Default to text
|
|
|
|
def _getMimeType(self, extension: str) -> str:
|
|
"""Get MIME type based on file extension"""
|
|
mime_types = {
|
|
'txt': 'text/plain',
|
|
'json': 'application/json',
|
|
'xml': 'application/xml',
|
|
'csv': 'text/csv',
|
|
'html': 'text/html',
|
|
'md': 'text/markdown',
|
|
'py': 'text/x-python',
|
|
'js': 'application/javascript',
|
|
'css': 'text/css',
|
|
'pdf': 'application/pdf',
|
|
'doc': 'application/msword',
|
|
'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
'xls': 'application/vnd.ms-excel',
|
|
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
'ppt': 'application/vnd.ms-powerpoint',
|
|
'pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation'
|
|
}
|
|
return mime_types.get(extension, 'application/octet-stream')
|
|
|
|
def _convertDocumentDataToString(self, document_data: Dict[str, Any], file_extension: str) -> str:
|
|
"""Convert document data to string content based on file type"""
|
|
try:
|
|
if file_extension == 'json':
|
|
return json.dumps(document_data, indent=2, ensure_ascii=False)
|
|
elif file_extension in ['txt', 'md', 'html', 'css', 'js', 'py']:
|
|
# For text files, try to extract text content
|
|
if isinstance(document_data, dict):
|
|
# Look for common text content fields
|
|
text_fields = ['content', 'text', 'data', 'result', 'summary']
|
|
for field in text_fields:
|
|
if field in document_data:
|
|
content = document_data[field]
|
|
if isinstance(content, str):
|
|
return content
|
|
elif isinstance(content, (dict, list)):
|
|
return json.dumps(content, indent=2, ensure_ascii=False)
|
|
|
|
# If no text field found, convert entire dict to JSON
|
|
return json.dumps(document_data, indent=2, ensure_ascii=False)
|
|
elif isinstance(document_data, str):
|
|
return document_data
|
|
else:
|
|
return str(document_data)
|
|
else:
|
|
# For other file types, convert to JSON
|
|
return json.dumps(document_data, indent=2, ensure_ascii=False)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error converting document data to string: {str(e)}")
|
|
return str(document_data)
|
|
|
|
async def _performTaskReview(self, review_context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Perform AI-based task review"""
|
|
try:
|
|
# Prepare prompt for result review
|
|
prompt = self._createResultReviewPrompt(review_context)
|
|
|
|
# Call AI with circuit breaker
|
|
response = await self._callAIWithCircuitBreaker(prompt, "result_review")
|
|
|
|
# Parse review result
|
|
review = self._parseReviewResponse(response)
|
|
|
|
# Add default values for missing fields
|
|
review.setdefault('status', 'unknown')
|
|
review.setdefault('reason', 'No reason provided')
|
|
review.setdefault('quality_score', 5)
|
|
|
|
return review
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error performing task review: {str(e)}")
|
|
return {
|
|
'status': 'success', # Default to success to avoid blocking workflow
|
|
'reason': f'Review failed: {str(e)}',
|
|
'quality_score': 5,
|
|
'confidence': 0.5
|
|
}
|
|
|
|
def _getPreviousResultsFromActions(self, task_actions: List[TaskAction]) -> List[str]:
|
|
"""Get list of previous results from completed actions and workflow messages"""
|
|
results = []
|
|
|
|
# Get results from action objects
|
|
for action in task_actions:
|
|
if action.execResultLabel and action.isSuccessful():
|
|
results.append(action.execResultLabel)
|
|
|
|
# Get results from workflow messages (for actions that have been executed)
|
|
if hasattr(self, 'workflow') and self.workflow and self.workflow.messages:
|
|
for message in self.workflow.messages:
|
|
if (message.role == 'assistant' and
|
|
message.status == 'step' and
|
|
message.documentsLabel and
|
|
message.documentsLabel not in results):
|
|
results.append(message.documentsLabel)
|
|
|
|
return results
|
|
|
|
def _calculateTaskQualityMetrics(self, task_step: Dict[str, Any], action_results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
"""Calculate quality metrics for task step results"""
|
|
try:
|
|
quality_score = 0
|
|
confidence = 0
|
|
|
|
# Count successful actions
|
|
successful_actions = sum(1 for result in action_results if result.get('status') == 'completed')
|
|
total_actions = len(action_results)
|
|
|
|
if total_actions > 0:
|
|
success_rate = successful_actions / total_actions
|
|
quality_score = int(success_rate * 10) # Scale to 0-10
|
|
confidence = min(success_rate, 1.0)
|
|
|
|
return {
|
|
'score': quality_score,
|
|
'confidence': confidence,
|
|
'successful_actions': successful_actions,
|
|
'total_actions': total_actions
|
|
}
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error calculating task quality metrics: {str(e)}")
|
|
return {'score': 0, 'confidence': 0, 'successful_actions': 0, 'total_actions': 0}
|
|
|
|
# ===== BASIC HELPER METHODS =====
|
|
|
|
def _getAvailableDocuments(self, workflow: ChatWorkflow) -> List[str]:
|
|
"""Get list of available documents in the workflow"""
|
|
documents = []
|
|
for message in workflow.messages:
|
|
for doc in message.documents:
|
|
documents.append(doc.filename)
|
|
return documents
|
|
|
|
def _getPreviousResults(self, task: TaskItem) -> List[str]:
|
|
"""Get list of previous results from completed actions"""
|
|
results = []
|
|
for action in task.actionList:
|
|
if action.execResultLabel:
|
|
results.append(action.execResultLabel)
|
|
return results
|
|
|
|
def _parseTaskPlanResponse(self, response: str) -> Dict[str, Any]:
|
|
"""Parse AI response into task plan structure"""
|
|
try:
|
|
# Extract JSON from response
|
|
json_start = response.find('{')
|
|
json_end = response.rfind('}') + 1
|
|
if json_start == -1 or json_end == 0:
|
|
raise ValueError("No JSON found in response")
|
|
|
|
json_str = response[json_start:json_end]
|
|
task_plan = json.loads(json_str)
|
|
|
|
# Validate structure
|
|
if 'tasks' not in task_plan:
|
|
raise ValueError("Task plan missing 'tasks' field")
|
|
|
|
return task_plan
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error parsing task plan response: {str(e)}")
|
|
return {'tasks': []}
|
|
|
|
def _parseActionResponse(self, response: str) -> List[Dict[str, Any]]:
|
|
"""Parse AI response into action list"""
|
|
try:
|
|
# Extract JSON from response
|
|
json_start = response.find('{')
|
|
json_end = response.rfind('}') + 1
|
|
if json_start == -1 or json_end == 0:
|
|
raise ValueError("No JSON found in response")
|
|
|
|
json_str = response[json_start:json_end]
|
|
action_data = json.loads(json_str)
|
|
|
|
# Validate structure
|
|
if 'actions' not in action_data:
|
|
raise ValueError("Action response missing 'actions' field")
|
|
|
|
return action_data['actions']
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error parsing action response: {str(e)}")
|
|
return []
|
|
|
|
def _parseReviewResponse(self, response: str) -> Dict[str, Any]:
|
|
"""Parse AI response into review result"""
|
|
try:
|
|
# Extract JSON from response
|
|
json_start = response.find('{')
|
|
json_end = response.rfind('}') + 1
|
|
if json_start == -1 or json_end == 0:
|
|
raise ValueError("No JSON found in response")
|
|
|
|
json_str = response[json_start:json_end]
|
|
review = json.loads(json_str)
|
|
|
|
# Validate structure
|
|
if 'status' not in review:
|
|
raise ValueError("Review response missing 'status' field")
|
|
|
|
return review
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error parsing review response: {str(e)}")
|
|
return {'status': 'failed', 'reason': f'Parse error: {str(e)}'}
|
|
|
|
async def _callAI(self, prompt: str, context: str) -> str:
|
|
"""Call AI service with prompt"""
|
|
try:
|
|
# Use the existing AI call mechanism through service
|
|
if hasattr(self, 'service') and self.service:
|
|
# Ensure service is properly initialized
|
|
if hasattr(self.service, 'callAiTextBasic'):
|
|
response = await self.service.callAiTextBasic(prompt)
|
|
return response
|
|
else:
|
|
raise Exception("Service does not have callAiTextBasic method")
|
|
else:
|
|
raise Exception("No service available for AI calls")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error calling AI for {context}: {str(e)}")
|
|
raise
|
|
|
|
# ===== WORKFLOW FEEDBACK GENERATION =====
|
|
|
|
async def generateWorkflowFeedback(self, workflow: ChatWorkflow) -> str:
|
|
"""Generate feedback message for workflow completion"""
|
|
try:
|
|
# Count messages by role
|
|
user_messages = [msg for msg in workflow.messages if msg.role == 'user']
|
|
assistant_messages = [msg for msg in workflow.messages if msg.role == 'assistant']
|
|
|
|
# Generate summary feedback
|
|
feedback = f"Workflow completed successfully.\n\n"
|
|
feedback += f"Processed {len(user_messages)} user inputs and generated {len(assistant_messages)} responses.\n"
|
|
|
|
# Add final status
|
|
if workflow.status == "completed":
|
|
feedback += "All tasks completed successfully."
|
|
elif workflow.status == "partial":
|
|
feedback += "Some tasks completed with partial success."
|
|
else:
|
|
feedback += f"Workflow status: {workflow.status}"
|
|
|
|
return feedback
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error generating workflow feedback: {str(e)}")
|
|
return "Workflow processing completed."
|
|
|
|
# ===== UNIFIED WORKFLOW EXECUTION =====
|
|
|
|
async def executeUnifiedWorkflow(self, userInput: str, workflow: ChatWorkflow) -> Dict[str, Any]:
|
|
"""Execute workflow using the new unified phases"""
|
|
try:
|
|
logger.info(f"Starting unified workflow execution for workflow {workflow.id}")
|
|
|
|
# Phase 1: High-Level Task Planning
|
|
logger.info("=== PHASE 1: HIGH-LEVEL TASK PLANNING ===")
|
|
task_plan = await self.planHighLevelTasks(userInput, workflow)
|
|
if not task_plan or not task_plan.get('tasks'):
|
|
logger.error("Failed to create task plan")
|
|
return {
|
|
'status': 'failed',
|
|
'error': 'Failed to create task plan',
|
|
'phase': 'planning'
|
|
}
|
|
|
|
# Log task plan details
|
|
logger.debug(f"TASK PLAN CREATED: {json.dumps(task_plan, indent=2, ensure_ascii=False)}")
|
|
|
|
# Execute each task step
|
|
workflow_results = []
|
|
previous_results = []
|
|
|
|
for i, task_step in enumerate(task_plan['tasks']):
|
|
logger.info(f"=== PROCESSING TASK {i+1}/{len(task_plan['tasks'])}: {task_step.get('description', 'Unknown')} ===")
|
|
|
|
# Phase 2: Define Task Actions
|
|
logger.info(f"--- PHASE 2: DEFINING ACTIONS FOR TASK {i+1} ---")
|
|
task_actions = await self.defineTaskActions(task_step, workflow, previous_results)
|
|
if not task_actions:
|
|
logger.warning(f"No actions defined for task {i+1}, skipping")
|
|
continue
|
|
|
|
# Log task actions
|
|
logger.debug(f"TASK {i+1} ACTIONS CREATED: {json.dumps(task_actions, indent=2, ensure_ascii=False)}")
|
|
|
|
# Phase 3: Execute Task Actions
|
|
logger.info(f"--- PHASE 3: EXECUTING ACTIONS FOR TASK {i+1} ---")
|
|
action_results = await self.executeTaskActions(task_actions, workflow)
|
|
|
|
# Log action results
|
|
logger.debug(f"TASK {i+1} ACTION RESULTS: {json.dumps(action_results, indent=2, ensure_ascii=False)}")
|
|
|
|
# Phase 4: Review Task Completion
|
|
logger.info(f"--- PHASE 4: REVIEWING TASK {i+1} COMPLETION ---")
|
|
review_result = await self.reviewTaskCompletion(task_step, task_actions, action_results, workflow)
|
|
|
|
# Log review result
|
|
logger.debug(f"TASK {i+1} REVIEW RESULT: {json.dumps(review_result, indent=2, ensure_ascii=False)}")
|
|
|
|
# Phase 5: Prepare Task Handover
|
|
logger.info(f"--- PHASE 5: PREPARING TASK {i+1} HANDOVER ---")
|
|
handover_data = await self.prepareTaskHandover(task_step, task_actions, review_result, workflow)
|
|
|
|
# Log handover data
|
|
logger.debug(f"TASK {i+1} HANDOVER DATA: {json.dumps(handover_data, indent=2, ensure_ascii=False)}")
|
|
|
|
# Collect results for next iteration
|
|
workflow_results.append({
|
|
'task_step': task_step,
|
|
'task_actions': task_actions,
|
|
'action_results': action_results,
|
|
'review_result': review_result,
|
|
'handover_data': handover_data
|
|
})
|
|
|
|
# Update previous results for next task
|
|
previous_results = handover_data.get('available_results', [])
|
|
|
|
# Check if we should continue
|
|
if review_result.get('status') == 'failed':
|
|
logger.error(f"Task {i+1} failed, stopping workflow")
|
|
break
|
|
|
|
# Final workflow summary
|
|
successful_tasks = sum(1 for result in workflow_results if result['review_result'].get('status') == 'success')
|
|
total_tasks = len(workflow_results)
|
|
|
|
workflow_summary = {
|
|
'status': 'completed' if successful_tasks == total_tasks else 'partial',
|
|
'successful_tasks': successful_tasks,
|
|
'total_tasks': total_tasks,
|
|
'workflow_results': workflow_results,
|
|
'final_results': previous_results
|
|
}
|
|
|
|
logger.info(f"=== UNIFIED WORKFLOW COMPLETED: {successful_tasks}/{total_tasks} tasks successful ===")
|
|
logger.debug(f"FINAL WORKFLOW SUMMARY: {json.dumps(workflow_summary, indent=2, ensure_ascii=False)}")
|
|
return workflow_summary
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in unified workflow execution: {str(e)}")
|
|
return {
|
|
'status': 'failed',
|
|
'error': str(e),
|
|
'phase': 'execution'
|
|
}
|