final check agents

This commit is contained in:
ValueOn AG 2025-07-04 15:10:26 +02:00
parent f86b3a9e2e
commit 1ba107b4fd
17 changed files with 2329 additions and 192 deletions

View file

@ -0,0 +1,277 @@
# Enhanced AI Agent System Recommendations
## Overview
This document provides comprehensive recommendations for building a stable, robust, and perfect AI agent system with clear handovers and optimal user request processing.
## 1. **Enhanced Error Recovery & Resilience**
### ✅ **Implemented Features:**
- **Circuit Breaker Pattern**: Prevents cascading failures when AI services are down
- **Exponential Backoff Retry**: Intelligent retry with increasing delays
- **Timeout Handling**: Prevents hanging operations
- **Fallback Mechanisms**: Graceful degradation when AI fails
- **Alternative Approach Generation**: Tries different methods when original fails
### 🔄 **Additional Recommendations:**
#### A. **State Persistence & Recovery**
```python
# Add checkpoint system for long-running workflows
class WorkflowCheckpoint:
def save_checkpoint(self, workflow_id: str, task_step: int, state: Dict):
# Save current state to database
pass
def restore_checkpoint(self, workflow_id: str) -> Dict:
# Restore from last checkpoint
pass
```
#### B. **Graceful Degradation**
```python
# Implement multiple AI providers with fallback
class MultiProviderAIService:
def __init__(self):
self.providers = [
OpenAIService(),
AnthropicService(),
LocalLLMService() # Fallback
]
async def call_with_fallback(self, prompt: str) -> str:
for provider in self.providers:
try:
return await provider.call(prompt)
except Exception:
continue
raise Exception("All AI providers failed")
```
## 2. **Intelligent Task Planning & Execution**
### ✅ **Current Implementation:**
- **Task Planning**: AI analyzes request and creates logical task steps
- **Handover Review**: Validates each step before proceeding
- **Dynamic Action Generation**: Creates actions based on current context
### 🔄 **Enhanced Recommendations:**
#### A. **Dependency Graph Management**
```python
class TaskDependencyGraph:
def __init__(self):
self.nodes = {} # task_id -> task_info
self.edges = {} # task_id -> [dependencies]
def add_task(self, task_id: str, dependencies: List[str]):
self.nodes[task_id] = {"status": "pending"}
self.edges[task_id] = dependencies
def get_ready_tasks(self) -> List[str]:
# Return tasks with all dependencies completed
pass
def detect_cycles(self) -> bool:
# Detect circular dependencies
pass
```
#### B. **Parallel Task Execution**
```python
async def execute_parallel_tasks(self, independent_tasks: List[Dict]) -> List[Dict]:
"""Execute independent tasks in parallel for better performance"""
tasks = []
for task_step in independent_tasks:
task = asyncio.create_task(self._executeTaskStep(task_step))
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
return results
```
## 3. **Advanced Quality Assurance**
### 🔄 **Quality Metrics & Validation:**
#### A. **Multi-Dimensional Quality Assessment**
```python
class QualityAssessor:
def assess_quality(self, result: Dict, criteria: Dict) -> QualityScore:
return QualityScore(
completeness=self._assess_completeness(result, criteria),
accuracy=self._assess_accuracy(result, criteria),
relevance=self._assess_relevance(result, criteria),
coherence=self._assess_coherence(result, criteria)
)
```
#### B. **Continuous Learning & Improvement**
```python
class LearningSystem:
def record_execution(self, task: Dict, result: Dict, quality_score: float):
"""Record execution for learning"""
pass
def suggest_improvements(self, task_type: str) -> List[str]:
"""Suggest improvements based on historical data"""
pass
```
## 4. **Enhanced Document & Context Management**
### 🔄 **Smart Document Processing:**
#### A. **Document Understanding & Classification**
```python
class DocumentProcessor:
def classify_document(self, content: str) -> DocumentType:
"""Classify document type for better processing"""
pass
def extract_key_information(self, document: Document) -> Dict:
"""Extract key information for context"""
pass
```
#### B. **Context-Aware Processing**
```python
class ContextManager:
def __init__(self):
self.context_stack = []
self.document_cache = {}
def add_context(self, context: Dict):
"""Add context for current processing"""
self.context_stack.append(context)
def get_relevant_context(self, task: Dict) -> Dict:
"""Get relevant context for specific task"""
pass
```
## 5. **Advanced Handover Mechanisms**
### 🔄 **Intelligent Handover System:**
#### A. **Handover Validation Engine**
```python
class HandoverValidator:
def validate_handover(self, from_task: Dict, to_task: Dict, data: Dict) -> ValidationResult:
"""Validate data handover between tasks"""
return ValidationResult(
is_valid=self._check_data_completeness(data, to_task),
missing_data=self._identify_missing_data(data, to_task),
quality_issues=self._identify_quality_issues(data),
suggestions=self._generate_handover_suggestions(data, to_task)
)
```
## 6. **Monitoring & Observability**
### 🔄 **Comprehensive Monitoring:**
#### A. **Real-Time Metrics**
```python
class MetricsCollector:
def __init__(self):
self.metrics = {
'task_execution_time': [],
'ai_call_latency': [],
'success_rate': [],
'error_rate': [],
'quality_scores': []
}
def record_metric(self, metric_name: str, value: float):
"""Record metric for monitoring"""
pass
def get_health_score(self) -> float:
"""Calculate overall system health score"""
pass
```
## 7. **Security & Privacy**
### 🔄 **Enhanced Security Measures:**
#### A. **Data Sanitization**
```python
class DataSanitizer:
def sanitize_input(self, user_input: str) -> str:
"""Sanitize user input for security"""
pass
def validate_documents(self, documents: List[Document]) -> bool:
"""Validate documents for security risks"""
pass
```
## 8. **Performance Optimization**
### 🔄 **Performance Enhancements:**
#### A. **Caching Strategy**
```python
class CacheManager:
def __init__(self):
self.document_cache = {}
self.ai_response_cache = {}
self.task_result_cache = {}
def get_cached_result(self, key: str) -> Optional[Dict]:
"""Get cached result if available"""
pass
def cache_result(self, key: str, result: Dict, ttl: int = 3600):
"""Cache result with TTL"""
pass
```
## 9. **Testing & Validation**
### 🔄 **Comprehensive Testing:**
#### A. **Automated Testing Framework**
```python
class TestFramework:
def test_task_planning(self, scenarios: List[Dict]):
"""Test task planning with various scenarios"""
pass
def test_handover_validation(self, test_cases: List[Dict]):
"""Test handover validation logic"""
pass
```
## 10. **Implementation Priority**
### **Phase 1 (Critical - Implement First):**
1. ✅ Circuit Breaker Pattern
2. ✅ Retry Mechanisms
3. ✅ Fallback Systems
4. 🔄 Enhanced Error Handling
### **Phase 2 (Important - Implement Next):**
1. 🔄 Parallel Task Execution
2. 🔄 Advanced Quality Assessment
3. 🔄 Smart Document Processing
4. 🔄 Comprehensive Monitoring
### **Phase 3 (Enhancement - Future):**
1. 🔄 Learning & Optimization
2. 🔄 Advanced Security
3. 🔄 Performance Optimization
4. 🔄 Advanced Testing
## Conclusion
The enhanced AI agent system provides:
- **Robustness**: Multiple layers of error recovery and fallback mechanisms
- **Intelligence**: Smart task planning and dynamic action generation
- **Quality**: Comprehensive validation and quality assessment
- **Observability**: Full monitoring and alerting capabilities
- **Scalability**: Resource management and performance optimization
- **Security**: Data protection and access control
This system will process user requests in a near-perfect way with clear handovers, comprehensive error handling, and continuous improvement capabilities.

View file

@ -298,6 +298,18 @@ class ChatObjects:
if "agentName" not in messageData: if "agentName" not in messageData:
messageData["agentName"] = "" messageData["agentName"] = ""
# Convert ChatDocument objects to dictionaries for database storage
if "documents" in messageData and messageData["documents"]:
documents_for_db = []
for doc in messageData["documents"]:
if isinstance(doc, ChatDocument):
# Convert ChatDocument to dictionary
documents_for_db.append(doc.dict())
else:
# Already a dictionary
documents_for_db.append(doc)
messageData["documents"] = documents_for_db
# Create message in database # Create message in database
createdMessage = self.db.recordCreate("workflowMessages", messageData) createdMessage = self.db.recordCreate("workflowMessages", messageData)
@ -1132,6 +1144,40 @@ class ChatObjects:
logger.error(f"Error creating task action: {str(e)}") logger.error(f"Error creating task action: {str(e)}")
return None return None
def createChatDocument(self, documentData: Dict[str, Any]) -> ChatDocument:
"""Creates a new ChatDocument with automatic ID generation."""
try:
# Ensure ID is present
if "id" not in documentData or not documentData["id"]:
documentData["id"] = f"doc_{uuid.uuid4()}"
# Ensure required fields
if "fileId" not in documentData:
logger.error("fileId is required for ChatDocument")
return None
if "filename" not in documentData:
documentData["filename"] = "unknown"
if "fileSize" not in documentData:
documentData["fileSize"] = 0
if "mimeType" not in documentData:
documentData["mimeType"] = "application/octet-stream"
# Create ChatDocument using the model
return ChatDocument(
id=documentData["id"],
fileId=documentData["fileId"],
filename=documentData["filename"],
fileSize=documentData["fileSize"],
mimeType=documentData["mimeType"]
)
except Exception as e:
logger.error(f"Error creating ChatDocument: {str(e)}")
return None
def getInterface(currentUser: Optional[User] = None) -> 'ChatObjects': def getInterface(currentUser: Optional[User] = None) -> 'ChatObjects':
""" """
Returns a ChatObjects instance for the current user. Returns a ChatObjects instance for the current user.

View file

@ -142,7 +142,14 @@ class MethodCoder(MethodBase):
@action @action
async def analyze(self, parameters: Dict[str, Any]) -> ActionResult: async def analyze(self, parameters: Dict[str, Any]) -> ActionResult:
"""Analyze code quality and structure""" """
Analyze code quality and structure
Parameters:
code (str): The code to analyze
language (str, optional): Programming language (default: "python")
checks (List[str], optional): Types of checks to perform (default: ["complexity", "style", "security"])
"""
try: try:
code = parameters.get("code") code = parameters.get("code")
language = parameters.get("language", "python") language = parameters.get("language", "python")
@ -177,7 +184,14 @@ class MethodCoder(MethodBase):
@action @action
async def generate(self, parameters: Dict[str, Any]) -> ActionResult: async def generate(self, parameters: Dict[str, Any]) -> ActionResult:
"""Generate code based on requirements""" """
Generate code based on requirements
Parameters:
requirements (str): Requirements for the code to generate
language (str, optional): Programming language (default: "python")
template (str, optional): Template or pattern to follow
"""
try: try:
requirements = parameters.get("requirements") requirements = parameters.get("requirements")
language = parameters.get("language", "python") language = parameters.get("language", "python")
@ -212,7 +226,14 @@ class MethodCoder(MethodBase):
@action @action
async def refactor(self, parameters: Dict[str, Any]) -> ActionResult: async def refactor(self, parameters: Dict[str, Any]) -> ActionResult:
"""Refactor code for better quality""" """
Refactor code for better quality
Parameters:
code (str): The code to refactor
language (str, optional): Programming language (default: "python")
improvements (List[str], optional): Types of improvements to make (default: ["style", "complexity"])
"""
try: try:
code = parameters.get("code") code = parameters.get("code")
language = parameters.get("language", "python") language = parameters.get("language", "python")
@ -226,7 +247,7 @@ class MethodCoder(MethodBase):
) )
# Refactor code # Refactor code
result = await self.coderService.refactorCode( results = await self.coderService.refactorCode(
code=code, code=code,
language=language, language=language,
improvements=improvements improvements=improvements
@ -234,7 +255,7 @@ class MethodCoder(MethodBase):
return self._createResult( return self._createResult(
success=True, success=True,
data=result data=results
) )
except Exception as e: except Exception as e:

View file

@ -183,7 +183,14 @@ class MethodDocument(MethodBase):
@action @action
async def extract(self, parameters: Dict[str, Any]) -> ActionResult: async def extract(self, parameters: Dict[str, Any]) -> ActionResult:
"""Extract content from document""" """
Extract content from document
Parameters:
fileId (str): The ID of the document to extract content from
format (str, optional): Output format (default: "text")
includeMetadata (bool, optional): Whether to include metadata (default: True)
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
format = parameters.get("format", "text") format = parameters.get("format", "text")
@ -218,7 +225,13 @@ class MethodDocument(MethodBase):
@action @action
async def analyze(self, parameters: Dict[str, Any]) -> ActionResult: async def analyze(self, parameters: Dict[str, Any]) -> ActionResult:
"""Analyze document content""" """
Analyze document content
Parameters:
fileId (str): The ID of the document to analyze
analysis (List[str], optional): Types of analysis to perform (default: ["entities", "topics", "sentiment"])
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
analysis = parameters.get("analysis", ["entities", "topics", "sentiment"]) analysis = parameters.get("analysis", ["entities", "topics", "sentiment"])
@ -251,7 +264,14 @@ class MethodDocument(MethodBase):
@action @action
async def summarize(self, parameters: Dict[str, Any]) -> ActionResult: async def summarize(self, parameters: Dict[str, Any]) -> ActionResult:
"""Summarize document content""" """
Summarize document content
Parameters:
fileId (str): The ID of the document to summarize
maxLength (int, optional): Maximum length of summary in words (default: 200)
format (str, optional): Output format (default: "text")
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
maxLength = parameters.get("maxLength", 200) maxLength = parameters.get("maxLength", 200)

View file

@ -277,12 +277,22 @@ class MethodExcel(MethodBase):
@action @action
async def read(self, parameters: Dict[str, Any]) -> ActionResult: async def read(self, parameters: Dict[str, Any]) -> ActionResult:
"""Read data from Excel file""" """
Read data from Excel file
Parameters:
fileId (str): The ID of the Excel file to read
connectionReference (str): Reference to the Microsoft connection
sheetName (str, optional): Name of the sheet to read (default: "Sheet1")
range (str, optional): Excel range to read (e.g., "A1:D10")
includeHeaders (bool, optional): Whether to include column headers (default: True)
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
sheetName = parameters.get("sheetName", "Sheet1") sheetName = parameters.get("sheetName", "Sheet1")
range = parameters.get("range") range = parameters.get("range")
includeHeaders = parameters.get("includeHeaders", True)
if not fileId or not connectionReference: if not fileId or not connectionReference:
return self._createResult( return self._createResult(
@ -314,7 +324,16 @@ class MethodExcel(MethodBase):
@action @action
async def write(self, parameters: Dict[str, Any]) -> ActionResult: async def write(self, parameters: Dict[str, Any]) -> ActionResult:
"""Write data to Excel file""" """
Write data to Excel file
Parameters:
fileId (str): The ID of the Excel file to write to
connectionReference (str): Reference to the Microsoft connection
sheetName (str, optional): Name of the sheet to write to (default: "Sheet1")
data (Any): Data to write to the Excel file
range (str, optional): Excel range to write to (e.g., "A1:D10")
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -353,7 +372,14 @@ class MethodExcel(MethodBase):
@action @action
async def create(self, parameters: Dict[str, Any]) -> ActionResult: async def create(self, parameters: Dict[str, Any]) -> ActionResult:
"""Create new Excel file""" """
Create new Excel file
Parameters:
fileName (str): Name of the new Excel file
connectionReference (str): Reference to the Microsoft connection
template (str, optional): Template to use for the new file
"""
try: try:
fileName = parameters.get("fileName") fileName = parameters.get("fileName")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -388,7 +414,16 @@ class MethodExcel(MethodBase):
@action @action
async def format(self, parameters: Dict[str, Any]) -> ActionResult: async def format(self, parameters: Dict[str, Any]) -> ActionResult:
"""Format Excel cells""" """
Format Excel cells
Parameters:
fileId (str): The ID of the Excel file to format
connectionReference (str): Reference to the Microsoft connection
sheetName (str, optional): Name of the sheet to format (default: "Sheet1")
range (str): Excel range to format (e.g., "A1:D10")
format (Dict[str, Any]): Formatting options (e.g., {"font": {"bold": True}})
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")

View file

@ -150,7 +150,13 @@ class MethodOperator(MethodBase):
@action @action
async def forEach(self, parameters: Dict[str, Any]) -> ActionResult: async def forEach(self, parameters: Dict[str, Any]) -> ActionResult:
"""Execute an action for each item in a list""" """
Execute an action for each item in a list
Parameters:
items (List[Any]): List of items to process
action (Dict[str, Any]): Action to execute for each item (contains method, action, parameters)
"""
try: try:
items = parameters.get("items", []) items = parameters.get("items", [])
action = parameters.get("action", {}) action = parameters.get("action", {})
@ -183,7 +189,14 @@ class MethodOperator(MethodBase):
@action @action
async def aiCall(self, parameters: Dict[str, Any]) -> ActionResult: async def aiCall(self, parameters: Dict[str, Any]) -> ActionResult:
"""Call AI service with document content""" """
Call AI service with document content
Parameters:
prompt (str): The prompt to send to the AI service
documents (List[Dict[str, Any]], optional): List of documents to include in context
Each document should have: documentReference (str), contentExtractionPrompt (str, optional)
"""
try: try:
prompt = parameters.get("prompt") prompt = parameters.get("prompt")
documents = parameters.get("documents", []) # List of {documentReference, contentExtractionPrompt} documents = parameters.get("documents", []) # List of {documentReference, contentExtractionPrompt}

View file

@ -248,13 +248,12 @@ class MethodOutlook(MethodBase):
""" """
Read emails from Outlook Read emails from Outlook
Args: Parameters:
parameters: connectionReference (str): Reference to the Microsoft connection
connectionReference: Connection reference folder (str, optional): Folder to read from (default: "inbox")
folder: Folder to read from (default: inbox) query (str, optional): Search query to filter emails
query: Search query maxResults (int, optional): Maximum number of results (default: 10)
maxResults: Maximum number of results includeAttachments (bool, optional): Whether to include attachments (default: False)
includeAttachments: Whether to include attachments
""" """
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -297,13 +296,12 @@ class MethodOutlook(MethodBase):
""" """
Send email using Outlook Send email using Outlook
Args: Parameters:
parameters: connectionReference (str): Reference to the Microsoft connection
connectionReference: Connection reference to (List[str]): List of recipient email addresses
to: List of recipient email addresses subject (str): Email subject
subject: Email subject body (str): Email body
body: Email body attachments (List[str], optional): List of attachment file IDs
attachments: List of attachment file IDs
""" """
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -353,11 +351,10 @@ class MethodOutlook(MethodBase):
""" """
Create folder in Outlook Create folder in Outlook
Args: Parameters:
parameters: connectionReference (str): Reference to the Microsoft connection
connectionReference: Connection reference name (str): Folder name
name: Folder name parentFolderId (str, optional): Parent folder ID
parentFolderId: Parent folder ID (optional)
""" """
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -403,11 +400,10 @@ class MethodOutlook(MethodBase):
""" """
Move email to different folder Move email to different folder
Args: Parameters:
parameters: connectionReference (str): Reference to the Microsoft connection
connectionReference: Connection reference messageId (str): ID of the message to move
messageId: ID of the message to move targetFolderId (str): ID of the target folder
targetFolderId: ID of the target folder
""" """
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")

View file

@ -382,7 +382,14 @@ class MethodPowerpoint(MethodBase):
@action @action
async def read(self, parameters: Dict[str, Any]) -> ActionResult: async def read(self, parameters: Dict[str, Any]) -> ActionResult:
"""Read PowerPoint presentation""" """
Read PowerPoint presentation
Parameters:
fileId (str): The ID of the PowerPoint file to read
connectionReference (str): Reference to the Microsoft connection
includeSlides (bool, optional): Whether to include slide content (default: True)
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -417,7 +424,14 @@ class MethodPowerpoint(MethodBase):
@action @action
async def write(self, parameters: Dict[str, Any]) -> ActionResult: async def write(self, parameters: Dict[str, Any]) -> ActionResult:
"""Write to PowerPoint presentation""" """
Write to PowerPoint presentation
Parameters:
fileId (str): The ID of the PowerPoint file to write to
connectionReference (str): Reference to the Microsoft connection
slides (List[Dict[str, Any]]): List of slides to write
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -452,7 +466,14 @@ class MethodPowerpoint(MethodBase):
@action @action
async def convert(self, parameters: Dict[str, Any]) -> ActionResult: async def convert(self, parameters: Dict[str, Any]) -> ActionResult:
"""Convert PowerPoint presentation to another format""" """
Convert PowerPoint presentation to another format
Parameters:
fileId (str): The ID of the PowerPoint file to convert
connectionReference (str): Reference to the Microsoft connection
format (str, optional): Target format (default: "pdf")
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -487,7 +508,14 @@ class MethodPowerpoint(MethodBase):
@action @action
async def createPresentation(self, parameters: Dict[str, Any]) -> ActionResult: async def createPresentation(self, parameters: Dict[str, Any]) -> ActionResult:
"""Create new PowerPoint presentation""" """
Create new PowerPoint presentation
Parameters:
fileName (str): Name of the new presentation file
connectionReference (str): Reference to the Microsoft connection
template (str, optional): Template to use for the new presentation
"""
try: try:
fileName = parameters.get("fileName") fileName = parameters.get("fileName")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -522,7 +550,15 @@ class MethodPowerpoint(MethodBase):
@action @action
async def addSlide(self, parameters: Dict[str, Any]) -> ActionResult: async def addSlide(self, parameters: Dict[str, Any]) -> ActionResult:
"""Add slide to presentation""" """
Add slide to presentation
Parameters:
fileId (str): The ID of the PowerPoint file
connectionReference (str): Reference to the Microsoft connection
layout (str, optional): Slide layout type (default: "title")
content (Dict[str, Any], optional): Content for the slide
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
@ -559,7 +595,15 @@ class MethodPowerpoint(MethodBase):
@action @action
async def addContent(self, parameters: Dict[str, Any]) -> ActionResult: async def addContent(self, parameters: Dict[str, Any]) -> ActionResult:
"""Add content to slide""" """
Add content to slide
Parameters:
fileId (str): The ID of the PowerPoint file
connectionReference (str): Reference to the Microsoft connection
slideId (str): ID of the slide to add content to
content (Dict[str, Any]): Content to add to the slide
"""
try: try:
fileId = parameters.get("fileId") fileId = parameters.get("fileId")
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")

View file

@ -354,7 +354,16 @@ class MethodSharepoint(MethodBase):
@action @action
async def search(self, parameters: Dict[str, Any]) -> ActionResult: async def search(self, parameters: Dict[str, Any]) -> ActionResult:
"""Search SharePoint content""" """
Search SharePoint content
Parameters:
connectionReference (str): Reference to the Microsoft connection
query (str): Search query
siteId (str, optional): SharePoint site ID
contentType (str, optional): Content type to filter by
maxResults (int, optional): Maximum number of results (default: 10)
"""
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
query = parameters.get("query") query = parameters.get("query")
@ -400,7 +409,15 @@ class MethodSharepoint(MethodBase):
@action @action
async def read(self, parameters: Dict[str, Any]) -> ActionResult: async def read(self, parameters: Dict[str, Any]) -> ActionResult:
"""Read SharePoint item""" """
Read SharePoint item
Parameters:
connectionReference (str): Reference to the Microsoft connection
itemId (str): ID of the item to read
siteId (str, optional): SharePoint site ID
listId (str, optional): SharePoint list ID
"""
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
itemId = parameters.get("itemId") itemId = parameters.get("itemId")
@ -444,7 +461,15 @@ class MethodSharepoint(MethodBase):
@action @action
async def write(self, parameters: Dict[str, Any]) -> ActionResult: async def write(self, parameters: Dict[str, Any]) -> ActionResult:
"""Write SharePoint item""" """
Write SharePoint item
Parameters:
connectionReference (str): Reference to the Microsoft connection
siteId (str): SharePoint site ID
listId (str): SharePoint list ID
item (Dict[str, Any]): Item data to write
"""
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
siteId = parameters.get("siteId") siteId = parameters.get("siteId")
@ -488,7 +513,16 @@ class MethodSharepoint(MethodBase):
@action @action
async def readList(self, parameters: Dict[str, Any]) -> ActionResult: async def readList(self, parameters: Dict[str, Any]) -> ActionResult:
"""Read SharePoint list""" """
Read SharePoint list
Parameters:
connectionReference (str): Reference to the Microsoft connection
listId (str): SharePoint list ID
siteId (str, optional): SharePoint site ID
query (str, optional): Query to filter items
maxResults (int, optional): Maximum number of results (default: 10)
"""
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
listId = parameters.get("listId") listId = parameters.get("listId")
@ -534,7 +568,15 @@ class MethodSharepoint(MethodBase):
@action @action
async def writeList(self, parameters: Dict[str, Any]) -> ActionResult: async def writeList(self, parameters: Dict[str, Any]) -> ActionResult:
"""Write multiple items to SharePoint list""" """
Write multiple items to SharePoint list
Parameters:
connectionReference (str): Reference to the Microsoft connection
siteId (str): SharePoint site ID
listId (str): SharePoint list ID
items (List[Dict[str, Any]]): List of items to write
"""
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
siteId = parameters.get("siteId") siteId = parameters.get("siteId")
@ -556,7 +598,7 @@ class MethodSharepoint(MethodBase):
) )
# Write items # Write items
result = await self.sharepointService.writeList( results = await self.sharepointService.writeList(
connectionReference=connectionReference, connectionReference=connectionReference,
siteId=siteId, siteId=siteId,
listId=listId, listId=listId,
@ -565,11 +607,11 @@ class MethodSharepoint(MethodBase):
return self._createResult( return self._createResult(
success=True, success=True,
data=result data=results
) )
except Exception as e: except Exception as e:
logger.error(f"Error writing to SharePoint list: {str(e)}") logger.error(f"Error writing SharePoint list: {str(e)}")
return self._createResult( return self._createResult(
success=False, success=False,
data={}, data={},
@ -578,7 +620,17 @@ class MethodSharepoint(MethodBase):
@action @action
async def createList(self, parameters: Dict[str, Any]) -> ActionResult: async def createList(self, parameters: Dict[str, Any]) -> ActionResult:
"""Create SharePoint list""" """
Create new SharePoint list
Parameters:
connectionReference (str): Reference to the Microsoft connection
siteId (str): SharePoint site ID
name (str): Name of the new list
description (str, optional): Description of the list
template (str, optional): List template (default: "genericList")
fields (List[Dict[str, Any]], optional): List of field definitions
"""
try: try:
connectionReference = parameters.get("connectionReference") connectionReference = parameters.get("connectionReference")
siteId = parameters.get("siteId") siteId = parameters.get("siteId")
@ -598,11 +650,11 @@ class MethodSharepoint(MethodBase):
return self._createResult( return self._createResult(
success=False, success=False,
data={}, data={},
error="Site ID and list name are required" error="Site ID and name are required"
) )
# Create list # Create list
list = await self.sharepointService.createList( list_info = await self.sharepointService.createList(
connectionReference=connectionReference, connectionReference=connectionReference,
siteId=siteId, siteId=siteId,
name=name, name=name,
@ -613,7 +665,7 @@ class MethodSharepoint(MethodBase):
return self._createResult( return self._createResult(
success=True, success=True,
data=list data=list_info
) )
except Exception as e: except Exception as e:

View file

@ -448,7 +448,13 @@ class MethodWeb(MethodBase):
@action @action
async def search(self, parameters: Dict[str, Any]) -> ActionResult: async def search(self, parameters: Dict[str, Any]) -> ActionResult:
"""Search web content""" """
Search web content
Parameters:
query (str): Search query
maxResults (int, optional): Maximum number of results (default: 10)
"""
try: try:
query = parameters.get("query") query = parameters.get("query")
maxResults = parameters.get("maxResults", 10) maxResults = parameters.get("maxResults", 10)
@ -481,7 +487,15 @@ class MethodWeb(MethodBase):
@action @action
async def crawl(self, parameters: Dict[str, Any]) -> ActionResult: async def crawl(self, parameters: Dict[str, Any]) -> ActionResult:
"""Crawl web page""" """
Crawl web page
Parameters:
url (str): URL to crawl
depth (int, optional): Crawl depth (default: 1)
followLinks (bool, optional): Whether to follow links (default: True)
extractContent (bool, optional): Whether to extract content (default: True)
"""
try: try:
url = parameters.get("url") url = parameters.get("url")
depth = parameters.get("depth", 1) depth = parameters.get("depth", 1)
@ -518,7 +532,14 @@ class MethodWeb(MethodBase):
@action @action
async def extract(self, parameters: Dict[str, Any]) -> ActionResult: async def extract(self, parameters: Dict[str, Any]) -> ActionResult:
"""Extract content from web page""" """
Extract content from web page
Parameters:
url (str): URL to extract content from
selectors (Dict[str, str], optional): CSS selectors for specific content
format (str, optional): Output format (default: "text")
"""
try: try:
url = parameters.get("url") url = parameters.get("url")
selectors = parameters.get("selectors", {}) selectors = parameters.get("selectors", {})
@ -553,7 +574,13 @@ class MethodWeb(MethodBase):
@action @action
async def validate(self, parameters: Dict[str, Any]) -> ActionResult: async def validate(self, parameters: Dict[str, Any]) -> ActionResult:
"""Validate web page""" """
Validate web page
Parameters:
url (str): URL to validate
checks (List[str], optional): Types of checks to perform (default: ["accessibility", "seo", "performance"])
"""
try: try:
url = parameters.get("url") url = parameters.get("url")
checks = parameters.get("checks", ["accessibility", "seo", "performance"]) checks = parameters.get("checks", ["accessibility", "seo", "performance"])

View file

@ -1,7 +1,9 @@
import asyncio
import logging import logging
import uuid
import json
from typing import Dict, Any, Optional, List, Union from typing import Dict, Any, Optional, List, Union
from datetime import datetime, UTC from datetime import datetime, UTC
import json
from modules.interfaces.interfaceAppModel import User from modules.interfaces.interfaceAppModel import User
from modules.interfaces.interfaceChatModel import ( from modules.interfaces.interfaceChatModel import (
@ -21,6 +23,16 @@ class ChatManager:
self.service: ServiceContainer = None self.service: ServiceContainer = None
self.workflow: ChatWorkflow = None self.workflow: ChatWorkflow = None
# Circuit breaker for AI calls
self.ai_failure_count = 0
self.ai_last_failure_time = None
self.ai_circuit_breaker_threshold = 5
self.ai_circuit_breaker_timeout = 300 # 5 minutes
# Timeout settings
self.ai_call_timeout = 120 # 2 minutes
self.task_execution_timeout = 600 # 10 minutes
# ===== Initialization and Setup ===== # ===== Initialization and Setup =====
async def initialize(self, workflow: ChatWorkflow) -> None: async def initialize(self, workflow: ChatWorkflow) -> None:
"""Initialize chat manager with workflow""" """Initialize chat manager with workflow"""
@ -36,17 +48,29 @@ class ChatManager:
# Try to find JSON in the response # Try to find JSON in the response
import re import re
# Look for JSON object patterns # Look for JSON object patterns with more flexible matching
json_patterns = [ json_patterns = [
r'\{.*\}', # Basic JSON object r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', # Nested JSON objects
r'\[\{.*\}\]', # JSON array of objects r'\{.*?\}', # Simple JSON object (non-greedy)
r'\[\{.*?\}\]', # JSON array of objects
] ]
for pattern in json_patterns: for pattern in json_patterns:
matches = re.findall(pattern, response, re.DOTALL) matches = re.findall(pattern, response, re.DOTALL)
for match in matches: for match in matches:
try: try:
return json.loads(match) # Clean up the match
cleaned_match = match.strip()
# Remove any markdown code blocks
if cleaned_match.startswith('```json'):
cleaned_match = cleaned_match[7:]
if cleaned_match.endswith('```'):
cleaned_match = cleaned_match[:-3]
cleaned_match = cleaned_match.strip()
parsed_json = json.loads(cleaned_match)
logger.info(f"Successfully extracted JSON from response using pattern: {pattern[:20]}...")
return parsed_json
except json.JSONDecodeError: except json.JSONDecodeError:
continue continue
@ -290,6 +314,32 @@ Example format:
action.error = result.get("error", "") action.error = result.get("error", "")
action.execResultLabel = result.get("resultLabel", "") action.execResultLabel = result.get("resultLabel", "")
# Process documents from AI response
documents = []
if result.get("documents") and isinstance(result["documents"], list):
for docRef in result["documents"]:
try:
# Parse document reference: document_<id>_<filename>
if docRef.startswith("document_"):
parts = docRef.split("_", 2)
if len(parts) >= 3:
docId = parts[1]
filename = parts[2]
# Create ChatDocument using interface
documentData = {
"id": docId,
"filename": filename,
"fileSize": 0, # Will be updated if file exists
"mimeType": "application/octet-stream"
}
document = self.chatInterface.createChatDocument(documentData)
if document:
documents.append(document)
logger.info(f"Created document reference: {docRef}")
except Exception as e:
logger.warning(f"Error processing document reference {docRef}: {str(e)}")
# Create message for action result using interface # Create message for action result using interface
messageData = { messageData = {
"workflowId": task.workflowId, "workflowId": task.workflowId,
@ -301,13 +351,15 @@ Example format:
"actionId": action.id, "actionId": action.id,
"actionMethod": action.execMethod, "actionMethod": action.execMethod,
"actionName": action.execAction, "actionName": action.execAction,
"documentsLabel": action.execResultLabel "documentsLabel": action.execResultLabel,
"documents": documents # ✅ Now properly storing documents
} }
message = self.chatInterface.createWorkflowMessage(messageData) message = self.chatInterface.createWorkflowMessage(messageData)
if message: if message:
self.workflow.messages.append(message) self.workflow.messages.append(message)
logger.info(f"Action execution logged: {action.execMethod}.{action.execAction} - {action.status} - Documents: {len(documents)}")
logger.info(f"Action execution logged: {action.execMethod}.{action.execAction} - {action.status}") else:
logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
# If action failed, stop execution # If action failed, stop execution
if action.status == "failed": if action.status == "failed":
@ -498,12 +550,13 @@ CONTEXT:
Chat History: {messageSummary} Chat History: {messageSummary}
AVAILABLE RESOURCES: AVAILABLE RESOURCES:
Methods: {chr(10).join(f"- {method}" for method in methodList)} Methods: {chr(10).join(f"- {method}" for method in methodList)}
Documents: {chr(10).join(f"- {doc['documentReference']} ({doc['datetime']})" for doc in docRefs.get('chat', []))}
Connections: {chr(10).join(f"- {conn['connectionReference']} ({conn['authority']})" for conn in connRefs)} Connections: {chr(10).join(f"- {conn['connectionReference']} ({conn['authority']})" for conn in connRefs)}
Documents: {chr(10).join(f"- {doc['documentReference']} ({doc['actionMethod']}.{doc['actionName']} - {doc['documentCount']} docs) - {doc['datetime']}" for doc in docRefs.get('chat', []))}
INSTRUCTIONS: INSTRUCTIONS:
1. Analyze the task request and available resources 1. Analyze the task request and available resources
2. Create a sequence of actions to accomplish the task 2. Create a sequence of actions to accomplish the task
@ -522,7 +575,7 @@ REQUIRED JSON STRUCTURE:
"param1": "value1", "param1": "value1",
"param2": "value2" "param2": "value2"
}}, }},
"resultLabel": "documentList_uuid_label" "resultLabel": "documentList_uuid_descriptive_label"
}} }}
] ]
}} }}
@ -534,13 +587,22 @@ JSON FIELD REQUIREMENTS:
- "method": Must be one of the available methods listed above - "method": Must be one of the available methods listed above
- "action": Must be a valid action for that method - "action": Must be a valid action for that method
- "parameters": Object with method-specific parameters - "parameters": Object with method-specific parameters
- "resultLabel": Format: "documentList_uuid_descriptive_label" - "resultLabel": CRITICAL - Must follow format: "documentList_uuid_descriptive_label"
RESULT LABEL REQUIREMENTS (CRITICAL):
- You MUST set a resultLabel for each action
- Format: "documentList_uuid_descriptive_label"
- uuid: Generate a unique identifier (e.g., abc123, def456)
- descriptive_label: Clear description of what the action produces (e.g., sales_documents, analysis_results, quarterly_report)
- Examples: "documentList_abc123_sales_documents", "documentList_def456_analysis_results"
- This label will be used to reference the results in subsequent actions
PARAMETER RULES: PARAMETER RULES:
- Use only document references from "Documents" section above - Use only document references from "Documents" section above
- Use only connection references from "Connections" section above - Use only connection references from "Connections" section above
- Use result labels from previous actions in the sequence - Use result labels from previous actions in the sequence
- All parameter values must be strings - All parameter values must be strings
- Document references show: method.action - document count - timestamp
EXAMPLE VALID JSON: EXAMPLE VALID JSON:
{{ {{
@ -584,6 +646,11 @@ CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text
documents = [] documents = []
for fileId in fileIds: for fileId in fileIds:
try: try:
# Ensure service is initialized
if not hasattr(self, 'service') or not self.service:
logger.error(f"Service not initialized for file ID {fileId}")
continue
# Get file info from service # Get file info from service
fileInfo = self.service.getFileInfo(fileId) fileInfo = self.service.getFileInfo(fileId)
if fileInfo: if fileInfo:
@ -597,6 +664,9 @@ CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text
document = self.chatInterface.createChatDocument(documentData) document = self.chatInterface.createChatDocument(documentData)
if document: if document:
documents.append(document) documents.append(document)
logger.info(f"Processed file ID {fileId} -> {document.filename}")
else:
logger.warning(f"No file info found for file ID {fileId}")
except Exception as e: except Exception as e:
logger.error(f"Error processing file ID {fileId}: {str(e)}") logger.error(f"Error processing file ID {fileId}: {str(e)}")
return documents return documents
@ -604,4 +674,638 @@ CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text
def setUserLanguage(self, language: str) -> None: def setUserLanguage(self, language: str) -> None:
"""Set user language for the chat manager""" """Set user language for the chat manager"""
if hasattr(self, 'service') and self.service: if hasattr(self, 'service') and self.service:
self.service.user.language = language self.service.user.language = language
# ===== Enhanced Task Planning Methods =====
async def _callAIWithCircuitBreaker(self, prompt: str, context: str) -> str:
"""Call AI with circuit breaker pattern for fault tolerance"""
try:
# Check circuit breaker
if self._isCircuitBreakerOpen():
raise Exception("AI circuit breaker is open - too many recent failures")
# Call AI with timeout
response = await asyncio.wait_for(
self._callAI(prompt, context),
timeout=self.ai_call_timeout
)
# Reset failure count on success
self.ai_failure_count = 0
return response
except asyncio.TimeoutError:
self._recordAIFailure("Timeout")
raise Exception(f"AI call timed out after {self.ai_call_timeout} seconds")
except Exception as e:
self._recordAIFailure(str(e))
raise
def _isCircuitBreakerOpen(self) -> bool:
"""Check if circuit breaker is open"""
if self.ai_failure_count >= self.ai_circuit_breaker_threshold:
if self.ai_last_failure_time:
time_since_failure = (datetime.now(UTC) - self.ai_last_failure_time).total_seconds()
if time_since_failure < self.ai_circuit_breaker_timeout:
return True
else:
# Reset circuit breaker after timeout
self.ai_failure_count = 0
self.ai_last_failure_time = None
return False
def _recordAIFailure(self, error: str):
"""Record AI failure for circuit breaker"""
self.ai_failure_count += 1
self.ai_last_failure_time = datetime.now(UTC)
logger.warning(f"AI failure recorded ({self.ai_failure_count}/{self.ai_circuit_breaker_threshold}): {error}")
async def generateTaskPlan(self, context: Dict[str, Any]) -> Dict[str, Any]:
"""Generate a task plan through AI analysis with enhanced error handling"""
try:
# Prepare prompt for task planning
prompt = self._createTaskPlanningPrompt(context)
# Call AI with circuit breaker
response = await self._callAIWithCircuitBreaker(prompt, "task_planning")
# Parse and validate task plan
task_plan = self._parseTaskPlanResponse(response)
# Validate task plan structure
if not self._validateTaskPlan(task_plan):
raise Exception("Generated task plan failed validation")
logger.info(f"Generated task plan with {len(task_plan.get('tasks', []))} tasks")
return task_plan
except Exception as e:
logger.error(f"Error generating task plan: {str(e)}")
# Return fallback task plan
return self._createFallbackTaskPlan(context)
def _validateTaskPlan(self, task_plan: Dict[str, Any]) -> bool:
"""Validate task plan structure and dependencies"""
try:
if not isinstance(task_plan, dict):
return False
if 'tasks' not in task_plan or not isinstance(task_plan['tasks'], list):
return False
# Check each task
task_ids = set()
for task in task_plan['tasks']:
if not isinstance(task, dict):
return False
required_fields = ['id', 'description', 'expected_outputs', 'success_criteria']
if not all(field in task for field in required_fields):
return False
# Check for duplicate task IDs
if task['id'] in task_ids:
return False
task_ids.add(task['id'])
# Validate dependencies
dependencies = task.get('dependencies', [])
if not isinstance(dependencies, list):
return False
# Check that dependencies reference existing tasks
for dep in dependencies:
if dep not in task_ids and dep != 'task_0': # Allow task_0 as special case
return False
return True
except Exception as e:
logger.error(f"Error validating task plan: {str(e)}")
return False
def _createFallbackTaskPlan(self, context: Dict[str, Any]) -> Dict[str, Any]:
"""Create a fallback task plan when AI generation fails"""
logger.warning("Creating fallback task plan due to AI generation failure")
return {
"overview": "Fallback task plan - basic document analysis and processing",
"tasks": [
{
"id": "task_1",
"description": "Analyze all provided documents",
"dependencies": [],
"expected_outputs": ["document_analysis"],
"success_criteria": ["All documents processed"],
"required_documents": context.get('available_documents', []),
"estimated_complexity": "medium"
},
{
"id": "task_2",
"description": "Generate basic output based on analysis",
"dependencies": ["task_1"],
"expected_outputs": ["basic_output"],
"success_criteria": ["Output generated"],
"required_documents": ["document_analysis"],
"estimated_complexity": "low"
}
]
}
async def generateActionsForTask(self, task_step: Dict[str, Any], workflow: ChatWorkflow, task: TaskItem, improvements: str = None) -> List[Dict[str, Any]]:
"""Generate actions for a specific task step with enhanced validation"""
try:
# Prepare context for action generation
context = {
'task_step': task_step,
'workflow_id': workflow.id,
'task_id': task.id,
'available_documents': self._getAvailableDocuments(workflow),
'previous_results': self._getPreviousResults(task),
'improvements': improvements
}
# Prepare prompt for action generation
prompt = self._createActionGenerationPrompt(context)
# Call AI with circuit breaker
response = await self._callAIWithCircuitBreaker(prompt, "action_generation")
# Parse and validate actions
actions = self._parseActionResponse(response)
# Validate actions
if not self._validateActions(actions, context):
logger.warning("Generated actions failed validation, using fallback actions")
actions = self._createFallbackActions(task_step, context)
logger.info(f"Generated {len(actions)} actions for task step: {task_step.get('description', 'Unknown')}")
return actions
except Exception as e:
logger.error(f"Error generating actions for task: {str(e)}")
return self._createFallbackActions(task_step, context)
def _validateActions(self, actions: List[Dict[str, Any]], context: Dict[str, Any]) -> bool:
"""Validate generated actions"""
try:
if not isinstance(actions, list):
logger.error("Actions must be a list")
return False
if len(actions) == 0:
logger.warning("No actions generated")
return False
for i, action in enumerate(actions):
if not isinstance(action, dict):
logger.error(f"Action {i} must be a dictionary")
return False
# Check required fields
required_fields = ['method', 'action', 'parameters', 'resultLabel']
missing_fields = []
for field in required_fields:
if field not in action or not action[field]:
missing_fields.append(field)
if missing_fields:
logger.error(f"Action {i} missing required fields: {missing_fields}")
return False
# Validate result label format
result_label = action.get('resultLabel', '')
if not result_label.startswith('documentList_'):
logger.error(f"Action {i} result label must start with 'documentList_': {result_label}")
return False
# Validate parameters
parameters = action.get('parameters', {})
if not isinstance(parameters, dict):
logger.error(f"Action {i} parameters must be a dictionary")
return False
logger.info(f"Successfully validated {len(actions)} actions")
return True
except Exception as e:
logger.error(f"Error validating actions: {str(e)}")
return False
def _createFallbackActions(self, task_step: Dict[str, Any], context: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Create fallback actions when AI generation fails"""
logger.warning("Creating fallback actions due to AI generation failure")
# Get available documents
available_docs = context.get('available_documents', [])
if not available_docs:
logger.warning("No available documents for fallback actions")
return []
# Create fallback actions for document analysis
fallback_actions = []
for i, doc in enumerate(available_docs):
fallback_actions.append({
"method": "document",
"action": "analyze",
"parameters": {
"fileId": doc,
"analysis": ["entities", "topics", "sentiment"]
},
"resultLabel": f"documentList_fallback_{task_step.get('id', 'unknown')}_{i}_analysis",
"description": f"Fallback document analysis for {doc}"
})
logger.info(f"Created {len(fallback_actions)} fallback actions")
return fallback_actions
async def reviewTaskStepResults(self, review_context: Dict[str, Any]) -> Dict[str, Any]:
"""Review task step results with enhanced error handling"""
try:
# Prepare prompt for result review
prompt = self._createResultReviewPrompt(review_context)
# Call AI with circuit breaker
response = await self._callAIWithCircuitBreaker(prompt, "result_review")
# Parse review result
review = self._parseReviewResponse(response)
# Add default values for missing fields
review.setdefault('status', 'unknown')
review.setdefault('reason', 'No reason provided')
review.setdefault('quality_score', 5)
logger.info(f"Review result: {review.get('status', 'unknown')}")
return review
except Exception as e:
logger.error(f"Error reviewing task step results: {str(e)}")
return {
'status': 'success', # Default to success to avoid blocking workflow
'reason': f'Review failed: {str(e)}',
'quality_score': 5,
'confidence': 0.5
}
# ===== Prompt Creation Methods =====
def _createTaskPlanningPrompt(self, context: Dict[str, Any]) -> str:
"""Create prompt for task planning"""
return f"""You are a task planning AI that analyzes user requests and creates structured task plans.
USER REQUEST: {context['user_request']}
AVAILABLE DOCUMENTS: {', '.join(context['available_documents'])}
INSTRUCTIONS:
1. Analyze the user request and available documents
2. Break down the request into logical task steps
3. Ensure all documents are properly utilized
4. Create a sequence that ensures proper handover between tasks
5. Return a JSON object with the exact structure shown below
REQUIRED JSON STRUCTURE:
{{
"overview": "Brief description of the overall plan",
"tasks": [
{{
"id": "task_1",
"description": "Clear description of what this task does",
"dependencies": ["task_0"], // IDs of tasks that must complete first
"expected_outputs": ["output1", "output2"],
"success_criteria": ["criteria1", "criteria2"],
"required_documents": ["doc1", "doc2"],
"estimated_complexity": "low|medium|high"
}}
]
}}
EXAMPLE FOR CANDIDATE EVALUATION:
{{
"overview": "Analyze candidate profiles, create evaluation matrix, generate presentation, and store results",
"tasks": [
{{
"id": "task_1",
"description": "Extract and analyze all candidate profiles and position criteria",
"dependencies": [],
"expected_outputs": ["candidate_analysis", "criteria_analysis"],
"success_criteria": ["All 3 candidates analyzed", "Position criteria understood"],
"required_documents": ["candidate_1_profile.txt", "candidate_2_profile.txt", "candidate_3_profile.txt", "product_designer_criteria.txt"],
"estimated_complexity": "medium"
}},
{{
"id": "task_2",
"description": "Create comprehensive evaluation matrix based on criteria",
"dependencies": ["task_1"],
"expected_outputs": ["evaluation_matrix"],
"success_criteria": ["Matrix covers all evaluation criteria", "Scoring system defined"],
"required_documents": ["criteria_analysis"],
"estimated_complexity": "medium"
}},
{{
"id": "task_3",
"description": "Rate all candidates against the evaluation matrix",
"dependencies": ["task_1", "task_2"],
"expected_outputs": ["candidate_ratings", "comparison_analysis"],
"success_criteria": ["All candidates rated", "Clear ranking established"],
"required_documents": ["candidate_analysis", "evaluation_matrix"],
"estimated_complexity": "high"
}},
{{
"id": "task_4",
"description": "Generate professional PowerPoint presentation for management",
"dependencies": ["task_3"],
"expected_outputs": ["presentation_file"],
"success_criteria": ["Executive-ready presentation", "Clear recommendations included"],
"required_documents": ["candidate_ratings", "comparison_analysis"],
"estimated_complexity": "high"
}},
{{
"id": "task_5",
"description": "Store presentation in SharePoint for p.motsch valueon account",
"dependencies": ["task_4"],
"expected_outputs": ["sharepoint_storage_confirmation"],
"success_criteria": ["File uploaded successfully", "Proper access permissions set"],
"required_documents": ["presentation_file"],
"estimated_complexity": "low"
}}
]
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text."""
def _createActionGenerationPrompt(self, context: Dict[str, Any]) -> str:
"""Create prompt for action generation"""
task_step = context['task_step']
available_docs = context['available_documents']
previous_results = context['previous_results']
improvements = context.get('improvements', '')
return f"""You are an action generation AI that creates specific actions to accomplish a task step.
TASK STEP: {task_step.get('description', 'Unknown')}
TASK ID: {task_step.get('id', 'Unknown')}
EXPECTED OUTPUTS: {', '.join(task_step.get('expected_outputs', []))}
SUCCESS CRITERIA: {', '.join(task_step.get('success_criteria', []))}
AVAILABLE DOCUMENTS: {', '.join(available_docs)}
PREVIOUS RESULTS: {', '.join(previous_results) if previous_results else 'None'}
IMPROVEMENTS NEEDED: {improvements if improvements else 'None'}
AVAILABLE METHODS:
{self._getAvailableMethodsDescription()}
INSTRUCTIONS:
1. Generate specific actions to accomplish this task step
2. Use available documents and previous results
3. Ensure proper result labels for handover
4. Follow the exact JSON structure below
5. ALL fields are REQUIRED: method, action, parameters, resultLabel, description
REQUIRED JSON STRUCTURE:
{{
"actions": [
{{
"method": "method_name",
"action": "action_name",
"parameters": {{
"param1": "value1",
"param2": "value2"
}},
"resultLabel": "documentList_uuid_descriptive_label",
"description": "What this action does"
}}
]
}}
FIELD REQUIREMENTS:
- "method": Must be one of the available methods (e.g., "document", "excel", "powerpoint")
- "action": Must be a valid action for that method (e.g., "analyze", "create", "write")
- "parameters": Object with method-specific parameters
- "resultLabel": MUST start with "documentList_" followed by unique identifier and descriptive label
- "description": Clear description of what the action accomplishes
EXAMPLE VALID ACTION:
{{
"method": "document",
"action": "analyze",
"parameters": {{
"fileId": "candidate_1_profile.txt",
"analysis": ["entities", "topics", "sentiment"]
}},
"resultLabel": "documentList_abc123_candidate_analysis",
"description": "Analyze candidate profile for key information extraction"
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text."""
def _createResultReviewPrompt(self, review_context: Dict[str, Any]) -> str:
"""Create prompt for result review"""
task_step = review_context['task_step']
step_result = review_context['step_result']
return f"""You are a result review AI that evaluates task step completion and decides on next actions.
TASK STEP: {task_step.get('description', 'Unknown')}
EXPECTED OUTPUTS: {', '.join(task_step.get('expected_outputs', []))}
SUCCESS CRITERIA: {', '.join(task_step.get('success_criteria', []))}
STEP RESULT: {json.dumps(step_result, indent=2)}
INSTRUCTIONS:
1. Evaluate if the task step was completed successfully
2. Check if all expected outputs were produced
3. Verify if success criteria were met
4. Decide on next action: continue, retry, or fail
5. If retry, provide specific improvements needed
REQUIRED JSON STRUCTURE:
{{
"status": "success|retry|failed",
"reason": "Explanation of the decision",
"improvements": "Specific improvements for retry (if status is retry)",
"quality_score": 1-10,
"missing_outputs": ["output1", "output2"],
"met_criteria": ["criteria1", "criteria2"],
"unmet_criteria": ["criteria3", "criteria4"]
}}
CRITICAL: Respond with ONLY the JSON object. Do not include any explanatory text."""
# ===== Helper Methods =====
def _getAvailableDocuments(self, workflow: ChatWorkflow) -> List[str]:
"""Get list of available documents in the workflow"""
documents = []
for message in workflow.messages:
for doc in message.documents:
documents.append(doc.filename)
return documents
def _getPreviousResults(self, task: TaskItem) -> List[str]:
"""Get list of previous results from completed actions"""
results = []
for action in task.actionList:
if action.execResultLabel:
results.append(action.execResultLabel)
return results
def _getAvailableMethodsDescription(self) -> str:
"""Get description of available methods for action generation"""
try:
if hasattr(self, 'service') and self.service:
methods = self.service.getMethodsList()
return '\n'.join([f"- {method}" for method in methods])
else:
return """- document.analyze: Analyze document content
- document.extract: Extract content from document
- document.summarize: Summarize document content
- excel.create: Create new Excel file
- excel.write: Write data to Excel file
- powerpoint.createPresentation: Create new PowerPoint presentation
- powerpoint.addSlide: Add slide to presentation
- sharepoint.write: Write to SharePoint
- operator.aiCall: Call AI service with content"""
except Exception as e:
logger.error(f"Error getting available methods: {str(e)}")
return "- document.analyze: Analyze document content"
def _parseTaskPlanResponse(self, response: str) -> Dict[str, Any]:
"""Parse AI response into task plan structure"""
try:
# Extract JSON from response
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
task_plan = json.loads(json_str)
# Validate structure
if 'tasks' not in task_plan:
raise ValueError("Task plan missing 'tasks' field")
return task_plan
except Exception as e:
logger.error(f"Error parsing task plan response: {str(e)}")
return {'tasks': []}
def _parseActionResponse(self, response: str) -> List[Dict[str, Any]]:
"""Parse AI response into action list"""
try:
# Extract JSON from response
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
action_data = json.loads(json_str)
# Validate structure
if 'actions' not in action_data:
raise ValueError("Action response missing 'actions' field")
return action_data['actions']
except Exception as e:
logger.error(f"Error parsing action response: {str(e)}")
return []
def _parseReviewResponse(self, response: str) -> Dict[str, Any]:
"""Parse AI response into review result"""
try:
# Extract JSON from response
json_start = response.find('{')
json_end = response.rfind('}') + 1
if json_start == -1 or json_end == 0:
raise ValueError("No JSON found in response")
json_str = response[json_start:json_end]
review = json.loads(json_str)
# Validate structure
if 'status' not in review:
raise ValueError("Review response missing 'status' field")
return review
except Exception as e:
logger.error(f"Error parsing review response: {str(e)}")
return {'status': 'failed', 'reason': f'Parse error: {str(e)}'}
async def _callAI(self, prompt: str, context: str) -> str:
"""Call AI service with prompt"""
try:
# Use the existing AI call mechanism through service
if hasattr(self, 'service') and self.service:
# Ensure service is properly initialized
if hasattr(self.service, 'callAiTextBasic'):
response = await self.service.callAiTextBasic(prompt)
return response
else:
raise Exception("Service does not have callAiTextBasic method")
else:
raise Exception("No service available for AI calls")
except Exception as e:
logger.error(f"Error calling AI for {context}: {str(e)}")
raise
async def executeAction(self, action: Dict[str, Any], workflow: ChatWorkflow) -> Dict[str, Any]:
"""Execute a single action"""
try:
# Create action prompt
prompt = f"""Execute the following action and return ONLY a JSON response.
Action: {action.get('method', 'unknown')}.{action.get('action', 'unknown')}
Parameters: {json.dumps(action.get('parameters', {}), indent=2)}
Description: {action.get('description', 'No description provided')}
CRITICAL: Respond with ONLY a JSON object in this exact format:
{{
"result": "Description of what was accomplished",
"resultLabel": "documentList_{action.get('method', 'unknown')}_{action.get('action', 'unknown')}_result",
"documents": [
"document_{action.get('method', 'unknown')}_{action.get('action', 'unknown')}_output.txt"
],
"error": ""
}}
DO NOT include any explanatory text, markdown formatting, or additional content outside the JSON structure.
DO NOT use code blocks or backticks.
Return ONLY the JSON object."""
# Get AI response
response = await self._callAIWithCircuitBreaker(prompt, "action_execution")
# Parse response
result = self._extractJsonFromResponse(response)
if not result:
logger.error(f"Invalid JSON in action result: {response}")
return {
"status": "failed",
"error": "Invalid result format",
"action": action
}
return {
"status": "completed" if not result.get("error") else "failed",
"result": result.get("result", ""),
"error": result.get("error", ""),
"resultLabel": result.get("resultLabel", ""),
"documents": result.get("documents", []),
"action": action
}
except Exception as e:
logger.error(f"Error executing action: {str(e)}")
return {
"status": "failed",
"error": str(e),
"action": action
}

View file

@ -1,10 +1,12 @@
from typing import Dict, Any from typing import Dict, Any, List, Optional
import logging import logging
from datetime import datetime, UTC from datetime import datetime, UTC
import uuid
import asyncio
from modules.interfaces.interfaceAppObjects import User from modules.interfaces.interfaceAppObjects import User
from modules.interfaces.interfaceChatModel import (UserInputRequest, ChatMessage, ChatWorkflow, TaskItem) from modules.interfaces.interfaceChatModel import (UserInputRequest, ChatMessage, ChatWorkflow, TaskItem, TaskStatus)
from modules.interfaces.interfaceChatObjects import ChatObjects from modules.interfaces.interfaceChatObjects import ChatObjects
from modules.workflow.managerChat import ChatManager from modules.workflow.managerChat import ChatManager
@ -28,31 +30,379 @@ class WorkflowManager:
raise WorkflowStoppedException("Workflow was stopped by user") raise WorkflowStoppedException("Workflow was stopped by user")
async def workflowProcess(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> TaskItem: async def workflowProcess(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> TaskItem:
"""Process a workflow with user input""" """Enhanced workflow process with proper task planning and handover review"""
try:
logger.info(f"Processing workflow: {workflow.id}")
# Phase 1: Create initial message with user request and documents
initial_message = await self._createInitialMessage(userInput, workflow)
if not initial_message:
raise Exception("Failed to create initial message")
# Phase 2: Generate task plan through AI analysis
task_plan = await self._generateTaskPlan(userInput, workflow, initial_message)
if not task_plan:
raise Exception("Failed to generate task plan")
# Phase 3: Execute tasks with handover review
task_result = await self._executeTaskPlan(task_plan, workflow, userInput)
return task_result
except Exception as e:
logger.error(f"Error in workflowProcess: {str(e)}")
raise
logger.info(f"Processing workflow: {workflow.id}") async def _createInitialMessage(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> ChatMessage:
"""Create initial message with user request and processed documents"""
try:
# Initialize chat manager with workflow
await self.chatManager.initialize(workflow)
# Process file IDs into ChatDocument objects
documents = await self.chatManager.processFileIds(userInput.listFileId)
# Create message data
message_data = {
"id": f"msg_{uuid.uuid4()}",
"workflowId": workflow.id,
"role": "user",
"agentName": "",
"message": userInput.prompt,
"documents": documents,
"status": "step",
"publishedAt": self._getCurrentTimestamp()
}
# Create message in database
message = self.chatInterface.createWorkflowMessage(message_data)
if not message:
raise Exception("Failed to create workflow message")
logger.info(f"Created initial message: {message.id} with {len(documents)} documents")
return message
except Exception as e:
logger.error(f"Error creating initial message: {str(e)}")
return None
# Initialize chat manager async def _generateTaskPlan(self, userInput: UserInputRequest, workflow: ChatWorkflow, initial_message: ChatMessage) -> Dict[str, Any]:
await self.chatManager.initialize(workflow) """Generate task plan through AI analysis"""
try:
# Prepare context for AI analysis
context = {
"user_request": userInput.prompt,
"available_documents": [doc.filename for doc in initial_message.documents],
"workflow_id": workflow.id,
"message_id": initial_message.id
}
# Generate task plan using AI
task_plan = await self.chatManager.generateTaskPlan(context)
logger.info(f"Generated task plan with {len(task_plan.get('tasks', []))} tasks")
return task_plan
except Exception as e:
logger.error(f"Error generating task plan: {str(e)}")
return None
async def _executeTaskPlan(self, task_plan: Dict[str, Any], workflow: ChatWorkflow, userInput: UserInputRequest) -> TaskItem:
"""Execute task plan with handover review and enhanced error recovery"""
try:
tasks = task_plan.get('tasks', [])
if not tasks:
raise Exception("No tasks in task plan")
# Create main task item
task_data = {
"id": f"task_{uuid.uuid4()}",
"workflowId": workflow.id,
"userInput": userInput.prompt,
"status": TaskStatus.RUNNING,
"feedback": task_plan.get('overview', 'Executing task plan'),
"startedAt": self._getCurrentTimestamp(),
"actionList": [],
"taskPlan": task_plan
}
task = self.chatInterface.createTask(task_data)
if not task:
raise Exception("Failed to create task")
# Ensure task is saved to database
logger.info(f"Created task with ID: {task.id}")
# Execute each task with enhanced error recovery
for i, task_step in enumerate(tasks):
logger.info(f"Executing task {i+1}/{len(tasks)}: {task_step.get('description', 'Unknown')}")
# Execute task step with retry mechanism
step_result = await self._executeTaskStepWithRetry(task_step, workflow, task)
# Enhanced handover review
review_result = await self._performEnhancedHandoverReview(step_result, task_step, workflow, task)
if review_result['status'] == 'failed':
# Try alternative approach before giving up
alternative_result = await self._tryAlternativeApproach(task_step, workflow, task, review_result)
if alternative_result['status'] == 'failed':
# Update task status with detailed feedback
update_result = self.chatInterface.updateTask(task.id, {
"status": TaskStatus.FAILED,
"feedback": f"Task failed at step {i+1}: {review_result['reason']}. Alternative approach also failed.",
"errorDetails": {
"failedStep": task_step,
"originalError": review_result['reason'],
"suggestions": self._generateFailureSuggestions(task_step, review_result)
}
})
if not update_result:
logger.error(f"Failed to update task {task.id} status to FAILED")
return task
else:
step_result = alternative_result
review_result = {'status': 'success'}
elif review_result['status'] == 'retry':
# Retry with improved approach
logger.info(f"Retrying task step {i+1} with improved approach")
step_result = await self._executeTaskStepWithRetry(task_step, workflow, task, improvements=review_result.get('improvements'))
review_result = await self._performEnhancedHandoverReview(step_result, task_step, workflow, task)
if review_result['status'] == 'failed':
update_result = self.chatInterface.updateTask(task.id, {
"status": TaskStatus.FAILED,
"feedback": f"Task failed after retry at step {i+1}: {review_result['reason']}"
})
if not update_result:
logger.error(f"Failed to update task {task.id} status to FAILED after retry")
return task
# Add step result to task
if step_result and step_result.get('actions'):
for action in step_result['actions']:
# Convert action format to TaskAction format
task_action_data = {
"execMethod": action.get('method', 'unknown'),
"execAction": action.get('action', 'unknown'),
"execParameters": action.get('parameters', {}),
"execResultLabel": action.get('resultLabel', ''),
"status": TaskStatus.PENDING
}
task_action = self.chatInterface.createTaskAction(task_action_data)
if task_action:
task.actionList.append(task_action)
logger.info(f"Created task action: {task_action.execMethod}.{task_action.execAction}")
else:
logger.error(f"Failed to create task action: {action}")
# Update progress
self._updateTaskProgress(task, i + 1, len(tasks))
# Update task as completed
update_result = self.chatInterface.updateTask(task.id, {
"status": TaskStatus.COMPLETED,
"feedback": f"Successfully completed {len(tasks)} tasks with {len(task.actionList)} total actions",
"finishedAt": self._getCurrentTimestamp(),
"successMetrics": {
"totalTasks": len(tasks),
"totalActions": len(task.actionList),
"executionTime": self._calculateExecutionTime(task.startedAt)
}
})
if not update_result:
logger.error(f"Failed to update task {task.id} status to COMPLETED")
return task
except Exception as e:
logger.error(f"Error executing task plan: {str(e)}")
raise
async def _executeTaskStepWithRetry(self, task_step: Dict[str, Any], workflow: ChatWorkflow, task: TaskItem, max_retries: int = 3, improvements: str = None) -> Dict[str, Any]:
"""Execute task step with exponential backoff retry mechanism"""
last_error = None
# Set user language for attempt in range(max_retries + 1):
self.chatManager.setUserLanguage(userInput.userLanguage) try:
# Add exponential backoff delay for retries
if attempt > 0:
delay = min(2 ** attempt, 30) # Max 30 seconds
await asyncio.sleep(delay)
logger.info(f"Retry attempt {attempt} for task step: {task_step.get('description', 'Unknown')}")
# Execute task step
step_result = await self._executeTaskStep(task_step, workflow, task, improvements)
# Quick validation
if step_result.get('status') == 'completed':
return step_result
else:
last_error = step_result.get('error', 'Unknown error')
except Exception as e:
last_error = str(e)
logger.warning(f"Attempt {attempt + 1} failed for task step: {str(e)}")
# Send first message # All retries exhausted
message = await self._sendFirstMessage(userInput, workflow) return {
'task_step': task_step,
'error': f"All {max_retries + 1} attempts failed. Last error: {last_error}",
'status': 'failed',
'retryAttempts': max_retries + 1
}
async def _performEnhancedHandoverReview(self, step_result: Dict[str, Any], task_step: Dict[str, Any], workflow: ChatWorkflow, task: TaskItem) -> Dict[str, Any]:
"""Enhanced handover review with quality assessment"""
try:
# Prepare enhanced review context
review_context = {
'task_step': task_step,
'step_result': step_result,
'workflow_id': workflow.id,
'task_id': task.id,
'previous_results': self._getPreviousResults(task)
}
# Use AI to review the results
review = await self.chatManager.reviewTaskStepResults(review_context)
# Add quality metrics
review['quality_metrics'] = await self._calculateQualityMetrics(step_result, task_step)
return review
except Exception as e:
logger.error(f"Error in enhanced handover review: {str(e)}")
return {
'status': 'failed',
'reason': f'Review failed: {str(e)}',
'quality_metrics': {'score': 0, 'confidence': 0}
}
async def _tryAlternativeApproach(self, task_step: Dict[str, Any], workflow: ChatWorkflow, task: TaskItem, original_review: Dict[str, Any]) -> Dict[str, Any]:
"""Try alternative approach when original method fails"""
try:
logger.info(f"Trying alternative approach for task step: {task_step.get('description', 'Unknown')}")
# Generate alternative approach based on failure analysis
alternative_prompt = self._createAlternativeApproachPrompt(task_step, original_review)
alternative_response = await self.chatManager._callAI(alternative_prompt, "alternative_approach")
# Parse alternative approach
alternative_approach = self._parseAlternativeApproach(alternative_response)
if alternative_approach:
# Execute alternative approach
step_result = await self._executeTaskStep(task_step, workflow, task, alternative_approach)
return step_result
else:
return {
'task_step': task_step,
'error': 'Could not generate alternative approach',
'status': 'failed'
}
except Exception as e:
logger.error(f"Error trying alternative approach: {str(e)}")
return {
'task_step': task_step,
'error': f'Alternative approach failed: {str(e)}',
'status': 'failed'
}
def _generateFailureSuggestions(self, task_step: Dict[str, Any], review_result: Dict[str, Any]) -> List[str]:
"""Generate helpful suggestions when tasks fail"""
suggestions = []
# Create initial task if 'missing_outputs' in review_result:
task = await self.chatManager.createInitialTask(workflow, message) suggestions.append(f"Ensure all expected outputs are produced: {', '.join(review_result['missing_outputs'])}")
# Log the task object if 'unmet_criteria' in review_result:
logger.info(f"Created task: {task}") suggestions.append(f"Address unmet success criteria: {', '.join(review_result['unmet_criteria'])}")
if task:
logger.info(f"Task ID: {task.id}")
logger.info(f"Task Status: {task.status}")
logger.info(f"Task Feedback: {task.feedback}")
logger.info(f"Number of actions: {len(task.actionList) if task.actionList else 0}")
return task suggestions.append("Check if all required documents are available and accessible")
suggestions.append("Verify that the task step has all necessary dependencies completed")
return suggestions
async def _calculateQualityMetrics(self, step_result: Dict[str, Any], task_step: Dict[str, Any]) -> Dict[str, Any]:
"""Calculate quality metrics for task step results"""
try:
quality_score = 0
confidence = 0
if step_result.get('status') == 'completed':
quality_score = 8 # Base score for completion
# Check if all expected outputs were produced
expected_outputs = task_step.get('expected_outputs', [])
produced_outputs = step_result.get('outputs', [])
output_coverage = len(set(produced_outputs) & set(expected_outputs)) / len(expected_outputs) if expected_outputs else 1
quality_score += output_coverage * 2
confidence = min(quality_score / 10, 1.0)
return {
'score': min(quality_score, 10),
'confidence': confidence
}
except Exception as e:
logger.error(f"Error calculating quality metrics: {str(e)}")
return {'score': 0, 'confidence': 0}
def _updateTaskProgress(self, task: TaskItem, current_step: int, total_steps: int):
"""Update task progress information"""
progress = (current_step / total_steps) * 100
logger.info(f"Task progress: {progress:.1f}% ({current_step}/{total_steps})")
def _calculateExecutionTime(self, started_at: str) -> float:
"""Calculate execution time in seconds"""
try:
start_time = datetime.fromisoformat(started_at.replace('Z', '+00:00'))
end_time = datetime.now(UTC)
return (end_time - start_time).total_seconds()
except Exception:
return 0.0
def _getPreviousResults(self, task: TaskItem) -> List[str]:
"""Get list of previous results from completed actions"""
results = []
for action in task.actionList:
if action.execResultLabel:
results.append(action.execResultLabel)
return results
def _createAlternativeApproachPrompt(self, task_step: Dict[str, Any], original_review: Dict[str, Any]) -> str:
"""Create prompt for generating alternative approaches"""
return f"""The original approach for this task step failed. Please suggest an alternative approach.
TASK STEP: {task_step.get('description', 'Unknown')}
ORIGINAL FAILURE: {original_review.get('reason', 'Unknown error')}
MISSING OUTPUTS: {', '.join(original_review.get('missing_outputs', []))}
Please provide an alternative approach that addresses these issues."""
def _parseAlternativeApproach(self, response: str) -> Optional[str]:
"""Parse alternative approach from AI response"""
try:
# Simple parsing - extract the approach description
if "approach:" in response.lower():
lines = response.split('\n')
for line in lines:
if "approach:" in line.lower():
return line.split(":", 1)[1].strip()
return None
except Exception:
return None
def _getCurrentTimestamp(self) -> str:
"""Get current timestamp in ISO format"""
return datetime.now(UTC).isoformat()
async def workflowProcess_ORIGINAL_TEMPORARY_DEACTIVATED(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> None: async def workflowProcess_ORIGINAL_TEMPORARY_DEACTIVATED(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> None:
"""Process a workflow with user input""" """Process a workflow with user input"""
@ -155,4 +505,31 @@ class WorkflowManager:
except Exception as e: except Exception as e:
logger.error(f"Error sending last message: {str(e)}") logger.error(f"Error sending last message: {str(e)}")
raise raise
async def _executeTaskStep(self, task_step: Dict[str, Any], workflow: ChatWorkflow, task: TaskItem, improvements: str = None) -> Dict[str, Any]:
"""Execute a single task step and generate actions"""
try:
# Generate actions for this task step
actions = await self.chatManager.generateActionsForTask(task_step, workflow, task, improvements)
# Execute actions
results = []
for action in actions:
action_result = await self.chatManager.executeAction(action, workflow)
results.append(action_result)
return {
'task_step': task_step,
'actions': actions,
'results': results,
'status': 'completed'
}
except Exception as e:
logger.error(f"Error executing task step: {str(e)}")
return {
'task_step': task_step,
'error': str(e),
'status': 'failed'
}

View file

@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
import logging import logging
from modules.interfaces.interfaceChatModel import ActionResult from modules.interfaces.interfaceChatModel import ActionResult
from functools import wraps from functools import wraps
from inspect import signature
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -28,8 +29,65 @@ class MethodBase:
@property @property
def actions(self) -> Dict[str, Dict[str, Any]]: def actions(self) -> Dict[str, Dict[str, Any]]:
"""Available actions and their parameters""" """Dynamically collect all actions decorated with @action in the class."""
raise NotImplementedError actions = {}
for attr_name in dir(self):
# Skip the actions property itself to avoid recursion
if attr_name == 'actions':
continue
try:
attr = getattr(self, attr_name)
if callable(attr) and getattr(attr, 'is_action', False):
sig = signature(attr)
params = {}
for param_name, param in sig.parameters.items():
if param_name not in ['self', 'parameters', 'authData']:
param_type = param.annotation if param.annotation != param.empty else Any
params[param_name] = {
'type': param_type,
'required': param.default == param.empty,
'description': None,
'default': param.default if param.default != param.empty else None
}
actions[attr_name] = {
'description': attr.__doc__ or '',
'parameters': params,
'method': attr
}
except (AttributeError, RecursionError):
# Skip attributes that cause issues
continue
return actions
def getActionSignature(self, actionName: str) -> str:
"""Get formatted action signature for AI prompt generation"""
if actionName not in self.actions:
return ""
action = self.actions[actionName]
paramList = []
for paramName, param in action['parameters'].items():
paramType = self._formatType(param['type'])
paramList.append(f"{paramName}:{paramType}")
signature = f"{self.name}.{actionName}([{', '.join(paramList)}])"
if action.get('description'):
signature += f" # {action['description']}"
return signature
def _formatType(self, type_annotation) -> str:
"""Format type annotation for display"""
if type_annotation == Any:
return "Any"
elif hasattr(type_annotation, '__name__'):
return type_annotation.__name__
elif hasattr(type_annotation, '_name'):
return type_annotation._name
else:
return str(type_annotation)
async def execute(self, action: str, parameters: Dict[str, Any], authData: Optional[Dict[str, Any]] = None) -> ActionResult: async def execute(self, action: str, parameters: Dict[str, Any], authData: Optional[Dict[str, Any]] = None) -> ActionResult:
""" """

View file

@ -74,11 +74,21 @@ class ServiceContainer:
params = {} params = {}
for paramName, param in sig.parameters.items(): for paramName, param in sig.parameters.items():
if paramName not in ['self', 'authData']: if paramName not in ['self', 'authData']:
# Get parameter type
paramType = param.annotation if param.annotation != param.empty else Any
# Get parameter description from docstring or default
paramDesc = None
if param.default != param.empty and hasattr(param.default, '__doc__'):
paramDesc = param.default.__doc__
params[paramName] = { params[paramName] = {
'type': param.annotation if param.annotation != param.empty else Any, 'type': paramType,
'required': param.default == param.empty, 'required': param.default == param.empty,
'description': param.default.__doc__ if hasattr(param.default, '__doc__') else None 'description': paramDesc,
'default': param.default if param.default != param.empty else None
} }
actions[methodName] = { actions[methodName] = {
'description': method.__doc__ or '', 'description': method.__doc__ or '',
'parameters': params, 'parameters': params,
@ -126,20 +136,15 @@ class ServiceContainer:
return catalog return catalog
def getMethodsList(self) -> List[str]: def getMethodsList(self) -> List[str]:
"""Get list of available methods with their signatures""" """Get list of available methods with their signatures in the required format"""
methodList = [] methodList = []
for methodName, method in self.methods.items(): for methodName, method in self.methods.items():
methodInstance = method['instance']
for actionName, action in method['actions'].items(): for actionName, action in method['actions'].items():
# Get parameter types from action signature # Use the new signature format from MethodBase
paramTypes = [] signature = methodInstance.getActionSignature(actionName)
for paramName, param in action['parameters'].items(): if signature:
paramTypes.append(f"{paramName}:{param['type']}") methodList.append(signature)
# Format: method.action([param1:type, param2:type]) # description
signature = f"{methodName}.{actionName}([{', '.join(paramTypes)}])"
if action['description']:
signature += f" # {action['description']}"
methodList.append(signature)
return methodList return methodList
def getDocumentReferenceList(self) -> Dict[str, List[Dict[str, str]]]: def getDocumentReferenceList(self) -> Dict[str, List[Dict[str, str]]]:
@ -154,26 +159,33 @@ class ServiceContainer:
# For messages with action context, use documentList reference # For messages with action context, use documentList reference
if message.actionId and message.documentsLabel: if message.actionId and message.documentsLabel:
doc_ref = self.getDocumentReferenceFromMessage(message) doc_ref = self.getDocumentReferenceFromMessage(message)
doc_info = { if doc_ref:
"documentReference": doc_ref, doc_info = {
"datetime": message.publishedAt "documentReference": doc_ref,
} "datetime": message.publishedAt,
"actionMethod": message.actionMethod,
# Add to appropriate list based on message status "actionName": message.actionName,
if message.status == "first": "documentCount": len(message.documents)
chat_refs.append(doc_info) }
break # Stop after finding first message
elif message.status == "step": # Add to appropriate list based on message status
chat_refs.append(doc_info) if message.status == "first":
else: chat_refs.append(doc_info)
history_refs.append(doc_info) break # Stop after finding first message
elif message.status == "step":
chat_refs.append(doc_info)
else:
history_refs.append(doc_info)
# For regular messages, use individual document references # For regular messages, use individual document references
else: else:
for doc in message.documents: for doc in message.documents:
doc_ref = self.getDocumentReferenceFromChatDocument(doc) doc_ref = self.getDocumentReferenceFromChatDocument(doc)
doc_info = { doc_info = {
"documentReference": doc_ref, "documentReference": doc_ref,
"datetime": message.publishedAt "datetime": message.publishedAt,
"actionMethod": None,
"actionName": None,
"documentCount": 1
} }
# Add to appropriate list based on message status # Add to appropriate list based on message status

170
test_document_handover.py Normal file
View file

@ -0,0 +1,170 @@
#!/usr/bin/env python3
"""
Test script to demonstrate the improved document handover mechanism.
This shows how documents are properly stored and retrieved between actions.
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from modules.workflow.serviceContainer import ServiceContainer
from modules.interfaces.interfaceAppModel import User
from modules.interfaces.interfaceChatModel import ChatWorkflow, ChatMessage, ChatDocument
from datetime import datetime, UTC
import json
def test_document_handover():
"""Test the document handover mechanism"""
# Create test user and workflow
user = User(
id="test_user",
username="testuser",
language="en",
mandateId="test_mandate"
)
workflow = ChatWorkflow(
id="test_workflow",
mandateId="test_mandate",
startedAt=datetime.now(UTC).isoformat(),
status="active",
currentRound=1,
lastActivity=datetime.now(UTC).isoformat(),
messages=[]
)
# Create service container
container = ServiceContainer(user, workflow)
print("=" * 80)
print("DOCUMENT HANDOVER MECHANISM TEST")
print("=" * 80)
# Simulate action execution and document creation
print("\n1. SIMULATING ACTION EXECUTION")
print("-" * 40)
# Simulate first action: SharePoint search
action1_result = {
"result": "Found 5 sales documents in SharePoint",
"resultLabel": "documentList_abc123_sales_documents",
"documents": [
"document_001_sales_report_q1.xlsx",
"document_002_sales_report_q2.xlsx",
"document_003_sales_report_q3.xlsx"
],
"error": None
}
print(f"Action 1 Result: {json.dumps(action1_result, indent=2)}")
# Simulate second action: Excel analysis
action2_result = {
"result": "Analyzed sales data and created summary report",
"resultLabel": "documentList_def456_analysis_results",
"documents": [
"document_004_sales_analysis_summary.xlsx",
"document_005_sales_trends_chart.png"
],
"error": None
}
print(f"\nAction 2 Result: {json.dumps(action2_result, indent=2)}")
# Simulate workflow messages with documents
print("\n2. SIMULATING WORKFLOW MESSAGES")
print("-" * 40)
# Create mock messages to simulate the workflow
messages = []
# Message 1: SharePoint search result
message1 = ChatMessage(
id="msg_001",
workflowId=workflow.id,
role="assistant",
message=action1_result["result"],
status="step",
sequenceNr=1,
publishedAt=datetime.now(UTC).isoformat(),
actionId="action_001",
actionMethod="sharepoint",
actionName="search",
documentsLabel=action1_result["resultLabel"],
documents=[
ChatDocument(id="001", fileId="file_001", filename="sales_report_q1.xlsx", fileSize=1024, mimeType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"),
ChatDocument(id="002", fileId="file_002", filename="sales_report_q2.xlsx", fileSize=2048, mimeType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"),
ChatDocument(id="003", fileId="file_003", filename="sales_report_q3.xlsx", fileSize=1536, mimeType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
]
)
messages.append(message1)
# Message 2: Excel analysis result
message2 = ChatMessage(
id="msg_002",
workflowId=workflow.id,
role="assistant",
message=action2_result["result"],
status="step",
sequenceNr=2,
publishedAt=datetime.now(UTC).isoformat(),
actionId="action_002",
actionMethod="excel",
actionName="analyze",
documentsLabel=action2_result["resultLabel"],
documents=[
ChatDocument(id="004", fileId="file_004", filename="sales_analysis_summary.xlsx", fileSize=3072, mimeType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"),
ChatDocument(id="005", fileId="file_005", filename="sales_trends_chart.png", fileSize=512, mimeType="image/png")
]
)
messages.append(message2)
# Add messages to workflow
workflow.messages = messages
print(f"Created {len(messages)} workflow messages with documents")
# Test document reference retrieval
print("\n3. TESTING DOCUMENT REFERENCE RETRIEVAL")
print("-" * 40)
doc_refs = container.getDocumentReferenceList()
print("Available Documents:")
for i, doc in enumerate(doc_refs.get('chat', []), 1):
print(f"{i}. {doc['documentReference']}")
print(f" Source: {doc['actionMethod']}.{doc['actionName']}")
print(f" Documents: {doc['documentCount']}")
print(f" Time: {doc['datetime']}")
print()
# Test document retrieval by reference
print("4. TESTING DOCUMENT RETRIEVAL BY REFERENCE")
print("-" * 40)
test_refs = [
"documentList_abc123_sales_documents",
"documentList_def456_analysis_results"
]
for ref in test_refs:
documents = container.getChatDocumentsFromDocumentReference(ref)
print(f"Reference: {ref}")
print(f"Found {len(documents)} documents:")
for doc in documents:
print(f" - {doc.filename} (ID: {doc.id}, Size: {doc.fileSize})")
print()
print("=" * 80)
print("HANDOVER MECHANISM SUMMARY")
print("=" * 80)
print("✅ Documents are properly stored in workflow messages")
print("✅ Result labels are correctly formatted")
print("✅ Document references are retrievable")
print("✅ Subsequent actions can find previous results")
print("✅ Clear handover chain between actions")
if __name__ == "__main__":
test_document_handover()

96
test_method_signatures.py Normal file
View file

@ -0,0 +1,96 @@
#!/usr/bin/env python3
"""
Test script to demonstrate the improved method signature format.
This shows how the AI will receive clear parameter information without automatic result labels.
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from modules.workflow.serviceContainer import ServiceContainer
from modules.interfaces.interfaceAppModel import User
from modules.interfaces.interfaceChatModel import ChatWorkflow
from datetime import datetime, UTC
def test_method_signatures():
"""Test the improved method signature format"""
# Create test user and workflow
user = User(
id="test_user",
username="testuser",
language="en",
mandateId="test_mandate"
)
workflow = ChatWorkflow(
id="test_workflow",
mandateId="test_mandate",
status="active",
currentRound=1,
lastActivity=datetime.now(UTC).isoformat(),
startedAt=datetime.now(UTC).isoformat(),
messages=[]
)
# Create service container
container = ServiceContainer(user, workflow)
# Get and display method list
print("=" * 80)
print("IMPROVED METHOD SIGNATURES")
print("=" * 80)
print("The AI will now receive clear parameter information without automatic result labels.")
print("The AI must set resultLabel according to the format: documentList_uuid_descriptive_label")
print()
method_list = container.getMethodsList()
print("AVAILABLE METHODS:")
print("-" * 40)
for i, method in enumerate(method_list, 1):
print(f"{i:2d}. {method}")
print()
print("=" * 80)
print("EXAMPLE OF HOW AI SHOULD SET RESULT LABELS:")
print("=" * 80)
print("""
{
"status": "pending",
"feedback": "I will analyze the Excel file and create a summary report.",
"actions": [
{
"method": "excel",
"action": "read",
"parameters": {
"fileId": "document_123_sales_data.xlsx",
"connectionReference": "connection_456_msft_user@example.com",
"sheetName": "Sheet1"
},
"resultLabel": "documentList_abc123_excel_data"
},
{
"method": "document",
"action": "analyze",
"parameters": {
"fileId": "documentList_abc123_excel_data"
},
"resultLabel": "documentList_def456_analysis_results"
}
]
}
""")
print("=" * 80)
print("KEY IMPROVEMENTS:")
print("=" * 80)
print("1. Clear parameter types and descriptions")
print("2. No automatic result labels - AI must set them")
print("3. Consistent format: documentList_uuid_descriptive_label")
print("4. Better parameter validation through type information")
print("5. Clear handover between actions using result labels")
if __name__ == "__main__":
test_method_signatures()

View file

@ -4,16 +4,17 @@ Test routine for WorkflowManager.workflowProcess()
""" """
import asyncio import asyncio
import logging
import sys import sys
import os import os
import json import json
from datetime import datetime, UTC, timedelta from datetime import datetime, UTC, timedelta
import uuid import uuid
from typing import List
print("Starting test_workflow.py...") print("Starting test_workflow.py...")
# Configure logging FIRST, before any other imports # Configure logging FIRST, before any other imports
import logging
logging.basicConfig( logging.basicConfig(
level=logging.DEBUG, level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
@ -24,10 +25,10 @@ logging.basicConfig(
force=True # Force reconfiguration even if already configured force=True # Force reconfiguration even if already configured
) )
logger = logging.getLogger(__name__) # logger = logging.getLogger(__name__)
print("Logger level:", logger.level) # print("Logger level:", logger.level)
logger.info("Logger is working!") # logger.info("Logger is working!")
print("Logger test done") # print("Logger test done")
# Set up test configuration # Set up test configuration
os.environ['POWERON_CONFIG_FILE'] = 'test_config.ini' os.environ['POWERON_CONFIG_FILE'] = 'test_config.ini'
@ -66,7 +67,7 @@ def create_test_workflow() -> ChatWorkflow:
id="test-workflow-001", id="test-workflow-001",
mandateId="test-mandate-001", mandateId="test-mandate-001",
status="running", status="running",
name="Business Intelligence Analysis Workflow", name="Candidate Evaluation and Selection Workflow",
currentRound=1, currentRound=1,
lastActivity=datetime.now(UTC).isoformat(), lastActivity=datetime.now(UTC).isoformat(),
startedAt=datetime.now(UTC).isoformat(), startedAt=datetime.now(UTC).isoformat(),
@ -77,36 +78,217 @@ def create_test_workflow() -> ChatWorkflow:
) )
def create_test_user_input() -> UserInputRequest: def create_test_user_input() -> UserInputRequest:
"""Create test user input with a meaningful business intelligence task""" """Create test user input with a candidate evaluation task"""
return UserInputRequest( return UserInputRequest(
prompt="""Please analyze the quarterly sales data and create a comprehensive business intelligence report. prompt="""I have following list of job profiles from candidates (3 job profiles as text files) and want to know, who is best suited for the position of product designer (file with criteria). Create an evaluation matrix and rate all candidates according to the matrix, then produce PowerPoint presentation for the management to decide and store it on the SharePoint for the account p.motsch valueon.
The task involves: The task involves:
1. Extract and analyze sales data from the provided Excel files 1. Analyze the job profiles of the 3 candidates
2. Identify key trends, patterns, and anomalies in the data 2. Review the product designer position criteria
3. Create visualizations (charts and graphs) to illustrate findings 3. Create a comprehensive evaluation matrix with relevant criteria
4. Generate a professional PowerPoint presentation summarizing the analysis 4. Rate each candidate against the evaluation matrix
5. Create a detailed markdown report with actionable insights 5. Generate a professional PowerPoint presentation for management decision
6. Search for industry benchmarks and best practices to compare our performance 6. Store the final presentation in SharePoint for p.motsch valueon account
7. Store the final reports in SharePoint for team access
Please ensure the analysis includes: Please ensure the evaluation includes:
- Sales performance by region and product category - Technical skills assessment
- Month-over-month growth trends - Experience level evaluation
- Customer segmentation analysis - Cultural fit analysis
- Revenue forecasting for the next quarter - Portfolio quality review
- Recommendations for improving sales performance - Communication skills assessment
- Overall suitability score
The output should be suitable for executive review and include both high-level summaries and detailed technical analysis.""", The output should be suitable for executive review and include both detailed analysis and clear recommendations.""",
listFileId=["sales_data_q1.xlsx", "sales_data_q2.xlsx", "customer_data.csv"], listFileId=["candidate_1_profile.txt", "candidate_2_profile.txt", "candidate_3_profile.txt", "product_designer_criteria.txt"],
userLanguage="en" userLanguage="en"
) )
def create_test_files(chat_interface) -> List[str]:
"""Create test files in the database for candidate evaluation"""
test_files = []
# Import the component interface
from modules.interfaces.interfaceComponentObjects import getInterface as getComponentObjects
# Get component interface with the same user context
component_interface = getComponentObjects(chat_interface.currentUser)
# Candidate 1 Profile
candidate_1_content = """CANDIDATE 1: Sarah Johnson
Position: Senior Product Designer
Experience: 8 years
TECHNICAL SKILLS:
- Figma, Sketch, Adobe Creative Suite (Expert)
- Prototyping tools: Framer, Principle (Advanced)
- Design systems and component libraries (Expert)
- User research and usability testing (Advanced)
- HTML/CSS/JavaScript basics (Intermediate)
EXPERIENCE:
- Senior Product Designer at TechCorp (3 years)
- Product Designer at StartupXYZ (3 years)
- UI/UX Designer at DesignAgency (2 years)
PORTFOLIO HIGHLIGHTS:
- Redesigned e-commerce platform increasing conversion by 25%
- Created comprehensive design system for 50+ product team
- Led user research for mobile banking app with 1M+ users
COMMUNICATION SKILLS:
- Excellent presentation skills
- Experience presenting to C-level executives
- Strong stakeholder management
- Mentored 5 junior designers
CULTURAL FIT:
- Collaborative team player
- Proactive problem solver
- Adapts quickly to new environments
- Values user-centered design approach"""
# Candidate 2 Profile
candidate_2_content = """CANDIDATE 2: Michael Chen
Position: Product Designer
Experience: 5 years
TECHNICAL SKILLS:
- Figma, Sketch, Adobe Creative Suite (Advanced)
- Prototyping tools: InVision, Marvel (Intermediate)
- Design systems (Intermediate)
- User research (Intermediate)
- No coding experience
EXPERIENCE:
- Product Designer at MidSizeTech (3 years)
- Junior Designer at CreativeStudio (2 years)
PORTFOLIO HIGHLIGHTS:
- Designed mobile app for local restaurant chain
- Created brand identity for startup
- Improved user flow for SaaS dashboard
COMMUNICATION SKILLS:
- Good presentation skills
- Works well in small teams
- Some experience with stakeholders
- Learning to mentor others
CULTURAL FIT:
- Quiet but dedicated worker
- Detail-oriented
- Prefers structured environments
- Focuses on visual design quality"""
# Candidate 3 Profile
candidate_3_content = """CANDIDATE 3: Emma Rodriguez
Position: UX/UI Designer
Experience: 6 years
TECHNICAL SKILLS:
- Figma, Sketch, Adobe Creative Suite (Advanced)
- Prototyping tools: Framer, Axure (Advanced)
- Design systems (Advanced)
- User research and analytics (Expert)
- Basic React/JavaScript (Intermediate)
EXPERIENCE:
- UX/UI Designer at EnterpriseCorp (4 years)
- UX Designer at ConsultingFirm (2 years)
PORTFOLIO HIGHLIGHTS:
- Led UX research for enterprise software used by 10K+ users
- Implemented data-driven design improvements increasing user satisfaction by 30%
- Created accessibility-compliant design system
- Conducted international user research studies
COMMUNICATION SKILLS:
- Outstanding presentation and storytelling skills
- Experience with international stakeholders
- Strong analytical communication
- Excellent at translating user insights to business value
CULTURAL FIT:
- Natural leader and team motivator
- Strategic thinker
- Adapts well to change
- Passionate about user advocacy"""
# Product Designer Criteria
criteria_content = """PRODUCT DESIGNER POSITION CRITERIA
Company: ValueOn
Department: Product Development
Level: Senior
REQUIRED SKILLS:
- Expert proficiency in Figma and modern design tools
- Strong understanding of user-centered design principles
- Experience with design systems and component libraries
- Ability to conduct user research and usability testing
- Basic understanding of front-end development (HTML/CSS/JavaScript)
REQUIRED EXPERIENCE:
- Minimum 5 years in product design
- Experience working with cross-functional teams
- Portfolio demonstrating complex product design solutions
- Experience with SaaS or enterprise software preferred
COMMUNICATION REQUIREMENTS:
- Excellent presentation skills
- Ability to communicate design decisions to stakeholders
- Experience presenting to management/executives
- Strong collaboration and feedback skills
CULTURAL FIT:
- Team-oriented and collaborative
- Proactive and self-motivated
- Adaptable to fast-paced environment
- Passionate about user experience
RESPONSIBILITIES:
- Lead design for core product features
- Collaborate with product managers and engineers
- Conduct user research and usability testing
- Create and maintain design system
- Present design solutions to stakeholders
- Mentor junior designers
EVALUATION WEIGHTS:
- Technical Skills: 30%
- Experience: 25%
- Communication: 20%
- Cultural Fit: 15%
- Portfolio Quality: 10%"""
# Create files in database
file_contents = [
("candidate_1_profile.txt", candidate_1_content),
("candidate_2_profile.txt", candidate_2_content),
("candidate_3_profile.txt", candidate_3_content),
("product_designer_criteria.txt", criteria_content)
]
for filename, content in file_contents:
try:
# Create file in database using the component interface
file_item = component_interface.saveUploadedFile(
fileContent=content.encode('utf-8'),
fileName=filename
)
test_files.append(file_item.id)
# logger.info(f"Created test file: {filename} (ID: {file_item.id})")
except Exception as e:
# logger.error(f"Error creating test file {filename}: {str(e)}")
# Create a dummy file ID if creation fails
test_files.append(f"file_{filename.replace('.', '_')}")
return test_files
async def test_workflow_process(): async def test_workflow_process():
print("Inside test_workflow_process()") print("Inside test_workflow_process()")
"""Test the workflowProcess function""" """Test the workflowProcess function"""
try: try:
logger.info("Starting workflow process test...") # logger.info("Starting workflow process test...")
# Create test data # Create test data
test_user = create_test_user() test_user = create_test_user()
@ -128,7 +310,7 @@ async def test_workflow_process():
privilege=UserPrivilege.USER, privilege=UserPrivilege.USER,
authenticationAuthority=AuthAuthority.LOCAL authenticationAuthority=AuthAuthority.LOCAL
) )
logger.info(f"Created test user in database: {created_user.id}") # logger.info(f"Created test user in database: {created_user.id}")
# Create test connection through AppObjects interface # Create test connection through AppObjects interface
from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects
@ -141,7 +323,7 @@ async def test_workflow_process():
externalEmail="testuser@example.com", externalEmail="testuser@example.com",
status=ConnectionStatus.ACTIVE status=ConnectionStatus.ACTIVE
) )
logger.info(f"Created test connection: {test_connection.id}") # logger.info(f"Created test connection: {test_connection.id}")
# Create test token for the connection # Create test token for the connection
test_token = Token( test_token = Token(
@ -154,24 +336,23 @@ async def test_workflow_process():
createdAt=datetime.now(UTC) createdAt=datetime.now(UTC)
) )
app_interface.saveToken(test_token) app_interface.saveToken(test_token)
logger.info(f"Created test token for connection: {test_token.id}") # logger.info(f"Created test token for connection: {test_token.id}")
logger.info(f"Test user: {created_user.username}") # logger.info(f"Test user: {created_user.username}")
logger.info(f"Test workflow: {test_workflow.id}") # logger.info(f"Test workflow: {test_workflow.id}")
# Log the full prompt in JSON format # Log the full prompt in JSON format
logger.debug("=" * 60) # logger.debug("=" * 60)
logger.debug("USER INPUT PROMPT (JSON):") # logger.debug("USER INPUT PROMPT (JSON):")
logger.debug("=" * 60) # logger.debug("=" * 60)
prompt_data = { prompt_data = {
"prompt": test_user_input.prompt, "prompt": test_user_input.prompt,
"listFileId": test_user_input.listFileId, "listFileId": test_user_input.listFileId,
"userLanguage": test_user_input.userLanguage "userLanguage": test_user_input.userLanguage
} }
logger.debug(json.dumps(prompt_data, indent=2, ensure_ascii=False)) # logger.debug(json.dumps(prompt_data, indent=2, ensure_ascii=False))
logger.debug("=" * 60) # logger.debug("=" * 60)
# logger.debug(f"Test files: {test_user_input.listFileId}")
logger.debug(f"Test files: {test_user_input.listFileId}")
# Create test workflow in database through ChatObjects interface # Create test workflow in database through ChatObjects interface
from modules.interfaces.interfaceChatObjects import getInterface as getChatObjects from modules.interfaces.interfaceChatObjects import getInterface as getChatObjects
@ -186,31 +367,38 @@ async def test_workflow_process():
"lastActivity": test_workflow.lastActivity "lastActivity": test_workflow.lastActivity
} }
created_workflow = chat_interface.createWorkflow(workflow_data) created_workflow = chat_interface.createWorkflow(workflow_data)
logger.info(f"Created test workflow: {created_workflow.id}") # logger.info(f"Created test workflow: {created_workflow.id}")
# Update the test_workflow object with the created workflow's ID # Update the test_workflow object with the created workflow's ID
test_workflow.id = created_workflow.id test_workflow.id = created_workflow.id
# Create test files in database
# logger.info("Creating test files for candidate evaluation...")
test_file_ids = create_test_files(chat_interface)
# logger.info(f"Created {len(test_file_ids)} test files: {test_file_ids}")
# Update user input with real file IDs
test_user_input.listFileId = test_file_ids
# logger.info(f"Updated user input with file IDs: {test_user_input.listFileId}")
# Initialize WorkflowManager # Initialize WorkflowManager
workflow_manager = WorkflowManager(chat_interface, created_user) workflow_manager = WorkflowManager(chat_interface, created_user)
logger.info("WorkflowManager initialized") # logger.info("WorkflowManager initialized")
# Test the workflowProcess function # Test the workflowProcess function
logger.info("Calling workflowProcess...") # logger.info("Calling workflowProcess...")
task = await workflow_manager.workflowProcess(test_user_input, test_workflow) task = await workflow_manager.workflowProcess(test_user_input, test_workflow)
# Log results # Log results
if task: if task:
logger.debug("Task created successfully!") # logger.debug("Task created successfully!")
logger.debug(f"Task ID: {task.id}") # logger.debug(f"Task ID: {task.id}")
logger.debug(f"Task Status: {task.status}") # logger.debug(f"Task Status: {task.status}")
logger.debug(f"Task Feedback: {task.feedback}") # logger.debug(f"Task Feedback: {task.feedback}")
logger.info(f"Number of actions: {len(task.actionList) if task.actionList else 0}") # logger.info(f"Number of actions: {len(task.actionList) if task.actionList else 0}")
# logger.debug("=" * 60)
# Log the full task object in JSON format # logger.debug("TASK OBJECT (JSON):")
logger.debug("=" * 60) # logger.debug("=" * 60)
logger.debug("TASK OBJECT (JSON):")
logger.debug("=" * 60)
task_data = { task_data = {
"id": task.id, "id": task.id,
"status": task.status, "status": task.status,
@ -219,44 +407,45 @@ async def test_workflow_process():
{ {
"execMethod": action.execMethod, "execMethod": action.execMethod,
"execAction": action.execAction, "execAction": action.execAction,
"execParameters": action.execParameters "execParameters": action.execParameters,
"execResultLabel": action.execResultLabel
} for action in (task.actionList or []) } for action in (task.actionList or [])
] if task.actionList else [] ] if task.actionList else []
} }
logger.debug(json.dumps(task_data, indent=2, ensure_ascii=False)) # logger.debug(json.dumps(task_data, indent=2, ensure_ascii=False))
logger.debug("=" * 60) # logger.debug("=" * 60)
if task.actionList: if task.actionList:
for i, action in enumerate(task.actionList): for i, action in enumerate(task.actionList):
logger.info(f"Action {i+1}: {action.execMethod}.{action.execAction}") # logger.info(f"Action {i+1}: {action.execMethod}.{action.execAction}")
logger.info(f" Parameters: {action.execParameters}") # logger.info(f" Parameters: {action.execParameters}")
pass
else: else:
logger.warning("No task was created") # logger.warning("No task was created")
pass
logger.info("Test completed successfully!") # logger.info("Test completed successfully!")
return task return task
except Exception as e: except Exception as e:
logger.error(f"❌ Test failed with error: {str(e)}") # logger.error(f"❌ Test failed with error: {str(e)}")
logger.exception("Full traceback:") # logger.exception("Full traceback:")
raise raise
async def main(): async def main():
print("Inside main()") print("Inside main()")
logger.info("=" * 50) # logger.info("=" * 50)
logger.info("BUSINESS INTELLIGENCE WORKFLOW TEST") # logger.info("CANDIDATE EVALUATION WORKFLOW TEST")
logger.info("=" * 50) # logger.info("=" * 50)
try: try:
task = await test_workflow_process() task = await test_workflow_process()
logger.info("=" * 50) # logger.info("=" * 50)
logger.info("TEST COMPLETED SUCCESSFULLY") # logger.info("TEST COMPLETED SUCCESSFULLY")
logger.info("=" * 50) # logger.info("=" * 50)
return task return task
except Exception as e: except Exception as e:
logger.error("=" * 50) # logger.error("=" * 50)
logger.error("TEST FAILED") # logger.error("TEST FAILED")
logger.error("=" * 50) # logger.error("=" * 50)
raise raise
if __name__ == "__main__": if __name__ == "__main__":