chat models adapted

This commit is contained in:
ValueOn AG 2025-08-29 11:22:49 +02:00
parent e450e530aa
commit 3131e295f0
15 changed files with 370 additions and 187 deletions

12
app.py
View file

@ -59,6 +59,16 @@ def initLogging():
return not any(pattern in record.msg for pattern in http_debug_patterns)
return True
# Add filter to remove emojis from log messages to prevent Unicode encoding errors
class EmojiFilter(logging.Filter):
def filter(self, record):
if isinstance(record.msg, str):
# Remove emojis and other Unicode characters that might cause encoding issues
import re
# Remove emojis and other Unicode symbols
record.msg = re.sub(r'[^\x00-\x7F]+', '[EMOJI]', record.msg)
return True
# Configure handlers based on config
handlers = []
@ -69,6 +79,7 @@ def initLogging():
consoleHandler.addFilter(ChromeDevToolsFilter())
consoleHandler.addFilter(HttpcoreStarFilter())
consoleHandler.addFilter(HTTPDebugFilter())
consoleHandler.addFilter(EmojiFilter())
handlers.append(consoleHandler)
# Add file handler if enabled
@ -97,6 +108,7 @@ def initLogging():
fileHandler.addFilter(ChromeDevToolsFilter())
fileHandler.addFilter(HttpcoreStarFilter())
fileHandler.addFilter(HTTPDebugFilter())
fileHandler.addFilter(EmojiFilter())
handlers.append(fileHandler)
# Configure the root logger

View file

@ -1,6 +1,7 @@
import logging
from typing import Any, Dict, List, Optional
from datetime import datetime, UTC
import re
from modules.shared.timezoneUtils import get_utc_timestamp
from .documentUtility import (
getFileExtension,

View file

@ -131,10 +131,9 @@ class HandlingTasks:
# Log the full AI response for task planning
logger.info("=== TASK PLANNING AI RESPONSE RECEIVED ===")
logger.info(f"Response length: {len(prompt) if prompt else 0}")
logger.info(f"Response preview: {prompt[:500] if prompt else 'None'}...")
logger.info("=== FULL TASK PLANNING AI RESPONSE ===")
logger.info(prompt)
logger.info("=== END TASK PLANNING AI RESPONSE ===")
logger.debug("=== FULL TASK PLANNING AI RESPONSE ===")
logger.debug(prompt)
logger.debug("=== END TASK PLANNING AI RESPONSE ===")
# Inline _parseTaskPlanResponse logic
try:
@ -160,6 +159,26 @@ class HandlingTasks:
if not task_plan_dict.get('tasks'):
raise ValueError("Task plan contains no tasks")
# LANGUAGE DETECTION: Determine user language once for the entire workflow
# Priority: 1. languageUserDetected from AI response, 2. service.user.language, 3. "en"
detected_language = task_plan_dict.get('languageUserDetected', '').strip()
service_user_language = getattr(self.service.user, 'language', '') if self.service and self.service.user else ''
if detected_language and len(detected_language) == 2: # Valid language code like "en", "de", "fr"
user_language = detected_language
logger.info(f"Using detected language from AI response: {user_language}")
elif service_user_language and len(service_user_language) == 2:
user_language = service_user_language
logger.info(f"Using language from service user object: {user_language}")
else:
user_language = "en"
logger.info(f"Using default language: {user_language}")
# Set the detected language in the service for use throughout the workflow
if self.service and self.service.user:
self.service.user.language = user_language
logger.info(f"Set workflow user language to: {user_language}")
tasks = []
for i, task_dict in enumerate(task_plan_dict.get('tasks', [])):
if not isinstance(task_dict, dict):
@ -193,6 +212,7 @@ class HandlingTasks:
self.service.setWorkflowTotals(total_tasks=total_tasks)
logger.info(f"Task plan generated successfully with {len(tasks)} tasks")
logger.info(f"Workflow user language set to: {user_language}")
# PHASE 3: Create chat message containing the task plan
await self.createTaskPlanMessage(task_plan, workflow)
@ -445,9 +465,9 @@ class HandlingTasks:
raise ValueError("AI service returned no response")
# Log the full AI response for debugging
logger.info("=== FULL AI RESPONSE ===")
logger.info(prompt)
logger.info("=== END AI RESPONSE ===")
logger.debug("=== FULL AI RESPONSE ===")
logger.debug(prompt)
logger.debug("=== END AI RESPONSE ===")
# Inline parseActionResponse logic here
json_start = prompt.find('{')
@ -949,9 +969,9 @@ class HandlingTasks:
# Log the full AI response for result review
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
logger.info(f"Response length: {len(response) if response else 0}")
logger.info("=== FULL RESULT REVIEW AI RESPONSE ===")
logger.info(response)
logger.info("=== END RESULT REVIEW AI RESPONSE ===")
logger.debug("=== FULL RESULT REVIEW AI RESPONSE ===")
logger.debug(response)
logger.debug("=== END RESULT REVIEW AI RESPONSE ===")
# Inline parseReviewResponse logic here
json_start = response.find('{')

View file

@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
# Prompt creation helpers extracted from managerChat.py
def createTaskPlanningPrompt(context: TaskContext, service) -> str:
"""Create enhanced prompt for task planning with user-friendly message generation"""
"""Create enhanced prompt for task planning with user-friendly message generation and language detection"""
# Get user language directly from service.user.language
user_language = service.user.language if service and service.user else 'en'
@ -30,12 +30,32 @@ AVAILABLE DOCUMENTS: {', '.join(available_documents)}
INSTRUCTIONS:
1. Analyze the user request and available documents
2. Break down the request into 2-4 meaningful high-level task steps
2. Group related topics and sequential steps into single, comprehensive tasks
3. Focus on business outcomes, not technical operations
4. Each task should produce meaningful, usable outputs
5. Ensure proper handover between tasks using result labels
6. Generate user-friendly messages for each task in the user's language ({user_language})
7. Return a JSON object with the exact structure shown below
7. Detect the language of the user request and include it in languageUserDetected
8. Return a JSON object with the exact structure shown below
TASK GROUPING PRINCIPLES:
- COMBINE RELATED TOPICS: Group related subjects, sequential steps, or workflow-structured activities into single tasks
- SEQUENTIAL WORKFLOWS: If the user says "first do this, then that, then that" create ONE task that handles the entire sequence
- SIMILAR CONTENT: If multiple items deal with the same subject matter combine into ONE comprehensive task
- ONLY SPLIT WHEN DIFFERENT: Create separate tasks ONLY when the user explicitly wants different, independent things
EXAMPLES OF GOOD TASK GROUPING:
COMBINE INTO ONE TASK:
- "Analyze the documents, extract key insights, and create a summary report" ONE task: "Analyze documents and create comprehensive summary report"
- "First check my emails, then respond to urgent ones, then organize my inbox" ONE task: "Process and organize email inbox with priority responses"
- "Review the budget, analyze spending patterns, and suggest cost-cutting measures" ONE task: "Comprehensive budget analysis with optimization recommendations"
- "Create a business strategy, develop marketing plan, and prepare presentation" ONE task: "Develop complete business strategy with marketing plan and presentation"
SPLIT INTO MULTIPLE TASKS:
- "Create a business strategy for Q4" AND "Check my emails for messages from my assistant" TWO separate tasks (different subjects)
- "Analyze customer feedback" AND "Prepare quarterly financial report" TWO separate tasks (different business areas)
- "Review project timeline" AND "Update employee handbook" TWO separate tasks (unrelated activities)
TASK PLANNING PRINCIPLES:
- Break down complex requests into logical, sequential steps
@ -44,15 +64,18 @@ TASK PLANNING PRINCIPLES:
- Each task should produce results that can be used by subsequent tasks
- Ensure clear dependencies and handovers between tasks
- Provide clear, actionable user messages in the user's language ({user_language})
- Group related activities to minimize task fragmentation
- Only create multiple tasks when dealing with truly different, independent objectives
REQUIRED JSON STRUCTURE:
{{
"overview": "Brief description of the overall plan",
"userMessage": "User-friendly message explaining the task plan in {user_language}",
"languageUserDetected": "en", // Language code detected from user request (en, de, fr, it, es, etc.)
"tasks": [
{{
"id": "task_1",
"objective": "Clear business objective this task accomplishes",
"objective": "Clear business objective this task accomplishes (combining related activities)",
"dependencies": ["task_0"], // IDs of tasks that must complete first
"success_criteria": ["criteria1", "criteria2"],
"estimated_complexity": "low|medium|high",
@ -61,16 +84,16 @@ REQUIRED JSON STRUCTURE:
]
}}
EXAMPLES OF GOOD TASK OBJECTIVES:
EXAMPLES OF GOOD TASK OBJECTIVES (COMBINING RELATED ACTIVITIES):
- "Analyze documents and extract key insights for business communication"
- "Create professional business communication incorporating analyzed information"
- "Execute business communication using specified channels"
- "Document and store all business communication outcomes"
- "Execute business communication using specified channels and document outcomes"
- "Develop comprehensive business strategy with implementation roadmap and success metrics"
EXAMPLES OF GOOD SUCCESS CRITERIA:
- "Key insights extracted and ready for business use"
- "Professional communication created with clear business value"
- "Business communication successfully delivered"
- "Business communication successfully delivered and documented"
- "All outcomes properly documented and accessible"
EXAMPLES OF BAD TASK OBJECTIVES:
@ -78,6 +101,12 @@ EXAMPLES OF BAD TASK OBJECTIVES:
- "Convert data to CSV" (implementation detail - should be "Structure data for analysis")
- "Send email" (too specific - should be "Deliver business communication")
LANGUAGE DETECTION:
- Analyze the user request text to identify the language
- Use standard language codes: en (English), de (German), fr (French), it (Italian), es (Spanish), etc.
- If the language cannot be determined, use "en" as default
- Include the detected language in the languageUserDetected field
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
async def createActionDefinitionPrompt(context: TaskContext, service) -> str:

View file

@ -5,6 +5,8 @@ import logging
from datetime import datetime
import uuid
from pydantic import BaseModel
import threading
import time
from modules.shared.attributeUtils import to_dict
from modules.shared.timezoneUtils import get_utc_timestamp
@ -39,6 +41,11 @@ class DatabaseConnector:
self._tablesCache: Dict[str, List[Dict[str, Any]]] = {}
self._tableMetadataCache: Dict[str, TableCache] = {} # Cache for table metadata (record IDs, etc.)
# File locks with timeout protection
self._file_locks = {}
self._lock_manager = threading.Lock()
self._lock_timeouts = {} # Track when locks were acquired
# Initialize system table
self._systemTableName = "_system"
self._initializeSystemTable()
@ -100,6 +107,30 @@ class DatabaseConnector:
"""Returns the full path to a record file"""
return os.path.join(self._getTablePath(table), f"{recordId}.json")
def _get_file_lock(self, filepath: str, timeout_seconds: int = 30):
"""Get file lock with timeout protection"""
with self._lock_manager:
if filepath not in self._file_locks:
self._file_locks[filepath] = threading.Lock()
lock = self._file_locks[filepath]
# Check if lock is stale (held too long)
if filepath in self._lock_timeouts:
lock_age = time.time() - self._lock_timeouts[filepath]
if lock_age > timeout_seconds:
logger.warning(f"Stale lock detected for {filepath}, age: {lock_age}s")
# Force release stale lock
try:
lock.release()
except:
pass
# Create new lock
self._file_locks[filepath] = threading.Lock()
lock = self._file_locks[filepath]
return lock
def _ensureTableDirectory(self, table: str) -> bool:
"""Ensures the table directory exists."""
if table == self._systemTableName:
@ -153,7 +184,17 @@ class DatabaseConnector:
def _saveRecord(self, table: str, recordId: str, record: Dict[str, Any]) -> bool:
"""Saves a single record to the table."""
recordPath = self._getRecordPath(table, recordId)
lock = self._get_file_lock(recordPath)
try:
# Acquire lock with timeout
if not lock.acquire(timeout=30): # 30 second timeout
raise TimeoutError(f"Could not acquire lock for {recordPath} within 30 seconds")
# Record lock acquisition time
self._lock_timeouts[recordPath] = time.time()
# Ensure table directory exists
if not self._ensureTableDirectory(table):
raise ValueError(f"Error creating table directory for {table}")
@ -175,7 +216,6 @@ class DatabaseConnector:
record["_modifiedBy"] = self.userId
# Save the record file using atomic write
recordPath = self._getRecordPath(table, recordId)
tempPath = recordPath + '.tmp'
# Ensure directory exists
@ -230,6 +270,16 @@ class DatabaseConnector:
except:
pass
return False
finally:
# ALWAYS release lock, even on error
try:
if lock.locked():
lock.release()
if recordPath in self._lock_timeouts:
del self._lock_timeouts[recordPath]
except Exception as release_error:
logger.error(f"Error releasing lock for {recordPath}: {release_error}")
def _loadTable(self, table: str) -> List[Dict[str, Any]]:
"""Loads all records from a table folder."""
@ -358,14 +408,36 @@ class DatabaseConnector:
# Create metadata file path
metadataPath = os.path.join(self._getTablePath(table), "_metadata.json")
# Save metadata
with open(metadataPath, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
# Get lock for metadata file
lock = self._get_file_lock(metadataPath)
# Update cache
self._tableMetadataCache[table] = metadata
return True
try:
# Acquire lock with timeout
if not lock.acquire(timeout=30):
raise TimeoutError(f"Could not acquire lock for metadata {metadataPath} within 30 seconds")
# Record lock acquisition time
self._lock_timeouts[metadataPath] = time.time()
# Save metadata
with open(metadataPath, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
# Update cache
self._tableMetadataCache[table] = metadata
return True
finally:
# ALWAYS release lock
try:
if lock.locked():
lock.release()
if metadataPath in self._lock_timeouts:
del self._lock_timeouts[metadataPath]
except Exception as release_error:
logger.error(f"Error releasing metadata lock for {metadataPath}: {release_error}")
except Exception as e:
logger.error(f"Error saving metadata for table {table}: {e}")
return False

View file

@ -46,7 +46,7 @@ class ActionResult(BaseModel, ModelMixin):
resultLabel: Optional[str] = Field(None, description="Label for document routing (set by action handler, not by action methods)")
@classmethod
def success(cls, documents: List[ActionDocument] = None) -> 'ActionResult':
def iSsuccess(cls, documents: List[ActionDocument] = None) -> 'ActionResult':
"""Create a successful action result
Note: Do not set resultLabel - this is managed by the action handler
@ -57,7 +57,7 @@ class ActionResult(BaseModel, ModelMixin):
)
@classmethod
def failure(cls, error: str, documents: List[ActionDocument] = None) -> 'ActionResult':
def iSfailure(cls, error: str, documents: List[ActionDocument] = None) -> 'ActionResult':
"""Create a failed action result
Note: Do not set resultLabel - this is managed by the action handler

View file

@ -158,6 +158,10 @@ class ChatObjects:
status=workflow.get("status", "running"),
name=workflow.get("name"),
currentRound=workflow.get("currentRound", 1),
currentTask=workflow.get("currentTask", 0),
currentAction=workflow.get("currentAction", 0),
totalTasks=workflow.get("totalTasks", 0),
totalActions=workflow.get("totalActions", 0),
lastActivity=workflow.get("lastActivity", get_utc_timestamp()),
startedAt=workflow.get("startedAt", get_utc_timestamp()),
logs=[ChatLog(**log) for log in workflow.get("logs", [])],
@ -199,6 +203,10 @@ class ChatObjects:
status=created.get("status", "running"),
name=created.get("name"),
currentRound=created.get("currentRound", 1),
currentTask=created.get("currentTask", 0),
currentAction=created.get("currentAction", 0),
totalTasks=created.get("totalTasks", 0),
totalActions=created.get("totalActions", 0),
lastActivity=created.get("lastActivity", currentTime),
startedAt=created.get("startedAt", currentTime),
logs=[],
@ -232,16 +240,20 @@ class ChatObjects:
status=updated.get("status", workflow.status),
name=updated.get("name", workflow.name),
currentRound=updated.get("currentRound", workflow.currentRound),
currentTask=updated.get("currentTask", workflow.currentTask),
currentAction=updated.get("currentAction", workflow.currentAction),
totalTasks=updated.get("totalTasks", workflow.totalTasks),
totalActions=updated.get("totalActions", workflow.totalActions),
lastActivity=updated.get("lastActivity", workflow.lastActivity),
startedAt=updated.get("startedAt", workflow.startedAt),
logs=[ChatLog(**log) for log in updated.get("logs", workflow.logs)],
messages=[ChatMessage(**msg) for msg in updated.get("messages", workflow.messages)],
stats=ChatStat(**updated.get("dataStats", workflow.stats.dict() if workflow.stats else {})) if updated.get("dataStats") or workflow.stats else ChatStat(
bytesSent=0,
bytesReceived=0,
tokenCount=0,
processingTime=0
),
stats=ChatStat(**updated.get("dataStats", workflow.stats.dict() if workflow.stats else {})) if updated.get("dataStats") or workflow.stats else ChatStat(
bytesSent=0,
bytesReceived=0,
tokenCount=0,
processingTime=0
),
mandateId=updated.get("mandateId", workflow.mandateId)
)

View file

@ -14,13 +14,16 @@ from modules.shared.timezoneUtils import get_utc_timestamp
logger = logging.getLogger(__name__)
class MethodAi(MethodBase):
"""AI method implementation for direct AI processing"""
"""AI processing methods."""
def __init__(self, serviceCenter: Any):
"""Initialize the AI method"""
super().__init__(serviceCenter)
def __init__(self, service):
super().__init__(service)
self.name = "ai"
self.description = "Handle direct AI processing for any type of task"
self.description = "AI processing methods"
def _format_timestamp_for_filename(self) -> str:
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
@action
async def process(self, parameters: Dict[str, Any]) -> ActionResult:
@ -44,7 +47,7 @@ class MethodAi(MethodBase):
customInstructions = parameters.get("customInstructions", "")
if not aiPrompt:
return ActionResult.failure(
return ActionResult.isFailure(
error="AI prompt is required"
)
@ -174,13 +177,12 @@ class MethodAi(MethodBase):
result = await self.service.callAiTextBasic(enhanced_prompt, context)
# Create result document
timestamp = int(get_utc_timestamp())
fileName = f"ai_{processingMode}_{timestamp}{output_extension}"
fileName = f"ai_{processingMode}_{self._format_timestamp_for_filename()}{output_extension}"
# Return result in the standard ActionResult format
return ActionResult.success(
return ActionResult.isSuccess(
documents=[{
"documentName": fileName,
"documentData": {
@ -194,6 +196,6 @@ class MethodAi(MethodBase):
except Exception as e:
logger.error(f"Error in AI processing: {str(e)}")
return ActionResult.failure(
return ActionResult.isFailure(
error=str(e)
)

View file

@ -4,6 +4,8 @@ Handles document operations using the document service.
"""
import logging
import os
import re
from typing import Dict, Any, List, Optional
from datetime import datetime, UTC
@ -22,6 +24,10 @@ class MethodDocument(MethodBase):
self.name = "document"
self.description = "Handle document operations like extraction and analysis"
def _format_timestamp_for_filename(self) -> str:
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
@action
async def extract(self, parameters: Dict[str, Any]) -> ActionResult:
"""
@ -40,18 +46,18 @@ class MethodDocument(MethodBase):
includeMetadata = parameters.get("includeMetadata", True)
if not documentList:
return ActionResult.failure(
return ActionResult.isFailure(
error="Document list reference is required"
)
if not aiPrompt:
return ActionResult.failure(
return ActionResult.isFailure(
error="AI prompt is required"
)
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
if not chatDocuments:
return ActionResult.failure(
return ActionResult.isFailure(
error="No documents found for the provided reference"
)
@ -83,7 +89,7 @@ class MethodDocument(MethodBase):
continue
if not all_extracted_content:
return ActionResult.failure(
return ActionResult.isFailure(
error="No content could be extracted from any documents"
)
@ -140,7 +146,7 @@ class MethodDocument(MethodBase):
# Create output fileName based on original fileName and target format
original_fileName = chatDocument.fileName
base_name = original_fileName.rsplit('.', 1)[0] if '.' in original_fileName else original_fileName
output_fileName = f"{base_name}_extracted_{get_utc_timestamp()}{final_extension}"
output_fileName = f"{base_name}_extracted_{self._format_timestamp_for_filename()}{final_extension}"
# Create result data for this document
result_data = {
@ -159,12 +165,12 @@ class MethodDocument(MethodBase):
"mimeType": final_mime_type
})
return ActionResult.success(
return ActionResult.isSuccess(
documents=output_documents
)
except Exception as e:
logger.error(f"Error extracting content: {str(e)}")
return ActionResult.failure(
return ActionResult.isFailure(
error=str(e)
)
@ -186,12 +192,12 @@ class MethodDocument(MethodBase):
include_metadata = parameters.get("includeMetadata", True)
if not document_list:
return ActionResult.failure(
return ActionResult.isFailure(
error="Document list is required for generation"
)
if not expected_document_formats or len(expected_document_formats) == 0:
return ActionResult.failure(
return ActionResult.isFailure(
error="Expected document formats specification is required"
)
@ -200,7 +206,7 @@ class MethodDocument(MethodBase):
logger.info(f"Found {len(chat_documents)} chat documents")
if not chat_documents:
return ActionResult.failure(
return ActionResult.isFailure(
error="No documents found for the provided documentList reference"
)
@ -277,7 +283,7 @@ class MethodDocument(MethodBase):
base_name = original_documents[i].rsplit('.', 1)[0] if '.' in original_documents[i] else original_documents[i]
else:
base_name = f"document_{i+1}"
output_fileName = f"{base_name}_generated_{timestamp}{target_extension}"
output_fileName = f"{base_name}_generated_{self._format_timestamp_for_filename()}{target_extension}"
# Create result data
result_data = {
@ -297,16 +303,16 @@ class MethodDocument(MethodBase):
})
if not output_documents:
return ActionResult.failure(
return ActionResult.isFailure(
error="No documents could be generated"
)
return ActionResult.success(
return ActionResult.isSuccess(
documents=output_documents
)
except Exception as e:
logger.error(f"Error generating document: {str(e)}")
return ActionResult.failure(
return ActionResult.isFailure(
error=str(e)
)
@ -507,12 +513,12 @@ class MethodDocument(MethodBase):
includeMetadata = parameters.get("includeMetadata", True)
if not documentList:
return ActionResult.failure(
return ActionResult.isFailure(
error="Document list reference is required"
)
if not prompt:
return ActionResult.failure(
return ActionResult.isFailure(
error="Prompt is required to specify what kind of report to generate"
)
@ -520,7 +526,7 @@ class MethodDocument(MethodBase):
logger.info(f"Retrieved {len(chatDocuments)} chat documents for report generation")
if not chatDocuments:
return ActionResult.failure(
return ActionResult.isFailure(
error="No documents found for the provided reference"
)
@ -529,7 +535,7 @@ class MethodDocument(MethodBase):
# Create output fileName
timestamp = int(get_utc_timestamp())
output_fileName = f"report_{timestamp}.html"
output_fileName = f"report_{self._format_timestamp_for_filename()}.html"
result_data = {
"documentCount": len(chatDocuments),
@ -540,7 +546,7 @@ class MethodDocument(MethodBase):
logger.info(f"Generated HTML report: {output_fileName} with {len(html_content)} characters")
return ActionResult.success(
return ActionResult.isSuccess(
documents=[{
"documentName": output_fileName,
"documentData": result_data,
@ -549,7 +555,7 @@ class MethodDocument(MethodBase):
)
except Exception as e:
logger.error(f"Error generating report: {str(e)}")
return ActionResult.failure(
return ActionResult.isFailure(
error=str(e)
)

View file

@ -97,6 +97,10 @@ class MethodOutlook(MethodBase):
self.name = "outlook"
self.description = "Handle Microsoft Outlook email operations"
def _format_timestamp_for_filename(self) -> str:
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
def _getMicrosoftConnection(self, connectionReference: str) -> Optional[Dict[str, Any]]:
"""
Helper function to get Microsoft connection details.
@ -389,7 +393,7 @@ class MethodOutlook(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference:
return ActionResult.failure(error="Connection reference is required")
return ActionResult.isFailure(error="Connection reference is required")
# Validate filter parameter if provided
if filter:
@ -403,7 +407,7 @@ class MethodOutlook(MethodBase):
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Read emails using Microsoft Graph API
try:
@ -460,23 +464,23 @@ class MethodOutlook(MethodBase):
except ImportError:
logger.error("requests module not available")
return ActionResult.failure(error="requests module not available")
return ActionResult.isFailure(error="requests module not available")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 400:
logger.error(f"Bad Request (400) - Invalid filter or parameter: {e.response.text}")
return ActionResult.failure(error=f"Invalid filter syntax. Please check your filter parameter. Error: {e.response.text}")
return ActionResult.isFailure(error=f"Invalid filter syntax. Please check your filter parameter. Error: {e.response.text}")
elif e.response.status_code == 401:
logger.error("Unauthorized (401) - Access token may be expired or invalid")
return ActionResult.failure(error="Authentication failed. Please check your connection and try again.")
return ActionResult.isFailure(error="Authentication failed. Please check your connection and try again.")
elif e.response.status_code == 403:
logger.error("Forbidden (403) - Insufficient permissions to access emails")
return ActionResult.failure(error="Insufficient permissions to read emails from this folder.")
return ActionResult.isFailure(error="Insufficient permissions to read emails from this folder.")
else:
logger.error(f"HTTP Error {e.response.status_code}: {e.response.text}")
return ActionResult.failure(error=f"HTTP Error {e.response.status_code}: {e.response.text}")
return ActionResult.isFailure(error=f"HTTP Error {e.response.status_code}: {e.response.text}")
except Exception as e:
logger.error(f"Error reading emails from Microsoft Graph API: {str(e)}")
return ActionResult.failure(error=f"Failed to read emails: {str(e)}")
return ActionResult.isFailure(error=f"Failed to read emails: {str(e)}")
# Determine output format based on expected formats
output_extension = ".json" # Default
@ -493,9 +497,9 @@ class MethodOutlook(MethodBase):
return ActionResult.success(
return ActionResult.isSuccess(
documents=[{
"documentName": f"outlook_emails_{get_utc_timestamp()}.json",
"documentName": f"outlook_emails_{self._format_timestamp_for_filename()}.json",
"documentData": {
"connectionReference": connectionReference,
"folder": folder,
@ -515,7 +519,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error reading emails: {str(e)}")
return ActionResult.failure(
return ActionResult.isFailure(
error=str(e)
)
@ -551,7 +555,7 @@ class MethodOutlook(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference or not composed_email_ref:
return ActionResult.failure(
return ActionResult.isFailure(
error="Connection reference and composed email reference are required"
)
@ -560,14 +564,14 @@ class MethodOutlook(MethodBase):
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
logger.error(f"Failed to get Microsoft connection for reference: {connectionReference}")
return ActionResult.failure(error="Failed to get Microsoft connection")
return ActionResult.isFailure(error="Failed to get Microsoft connection")
# Get the composed email document
composed_email_docs = self.service.getChatDocumentsFromDocumentList([composed_email_ref])
if not composed_email_docs or len(composed_email_docs) == 0:
logger.error(f"Could not find composed email document: {composed_email_ref}")
return ActionResult.failure(error=f"Could not find composed email document: {composed_email_ref}")
return ActionResult.isFailure(error=f"Could not find composed email document: {composed_email_ref}")
composed_email_doc = composed_email_docs[0]
@ -582,7 +586,7 @@ class MethodOutlook(MethodBase):
file_id = getattr(composed_email_doc, 'fileId', None)
if not file_id:
logger.error("Document has no fileId attribute")
return ActionResult.failure(error="Composed email document has no fileId")
return ActionResult.isFailure(error="Composed email document has no fileId")
@ -592,7 +596,7 @@ class MethodOutlook(MethodBase):
file_content = self.service.getFileData(file_id)
if not file_content:
logger.error(f"Failed to read file content for fileId: {file_id}")
return ActionResult.failure(error="Failed to read composed email file content")
return ActionResult.isFailure(error="Failed to read composed email file content")
@ -610,7 +614,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error reading file content: {str(e)}")
return ActionResult.failure(error=f"Failed to read file content: {str(e)}")
return ActionResult.isFailure(error=f"Failed to read file content: {str(e)}")
# Parse the email data (should be JSON)
if isinstance(email_data, str):
@ -641,13 +645,13 @@ class MethodOutlook(MethodBase):
except json.JSONDecodeError as e2:
logger.error(f"Failed to parse extracted JSON: {str(e2)}")
logger.error(f"Extracted content: {repr(extracted_json)}")
return ActionResult.failure(error="Could not parse JSON content from composed email document")
return ActionResult.isFailure(error="Could not parse JSON content from composed email document")
else:
logger.error("No JSON content found in HTML document")
return ActionResult.failure(error="Composed email document content is not valid JSON and no JSON could be extracted")
return ActionResult.isFailure(error="Composed email document content is not valid JSON and no JSON could be extracted")
else:
logger.error(f"Unexpected email_data type: {type(email_data)}")
return ActionResult.failure(error=f"Unexpected email data type: {type(email_data)}, expected string")
return ActionResult.isFailure(error=f"Unexpected email data type: {type(email_data)}, expected string")
# At this point, email_data should be a parsed dictionary
@ -663,20 +667,20 @@ class MethodOutlook(MethodBase):
# Validate required fields
if not to or not subject or not body:
logger.error(f"Missing required fields. Available keys: {list(email_data.keys())}")
return ActionResult.failure(error="Composed email must contain 'to', 'subject', and 'body' fields")
return ActionResult.isFailure(error="Composed email must contain 'to', 'subject', and 'body' fields")
except Exception as e:
logger.error(f"Error parsing composed email document: {str(e)}")
return ActionResult.failure(error=f"Failed to parse composed email document: {str(e)}")
return ActionResult.isFailure(error=f"Failed to parse composed email document: {str(e)}")
# Check permissions before proceeding
permissions_ok = await self._checkPermissions(connection)
if not permissions_ok:
logger.error("Permission check failed")
return ActionResult.failure(error="Connection lacks necessary permissions for Outlook operations")
return ActionResult.isFailure(error="Connection lacks necessary permissions for Outlook operations")
# Create email draft using Microsoft Graph API
@ -789,12 +793,12 @@ class MethodOutlook(MethodBase):
# Return success with draft information
# Create document reference in standard format
document_reference = f"docItem:{uuid.uuid4()}:email_draft_created_{int(get_utc_timestamp())}.json"
document_reference = f"docItem:{uuid.uuid4()}:email_draft_created_{self._format_timestamp_for_filename()}.json"
return ActionResult(
success=True,
documents=[{
"documentName": f"email_draft_created_{int(get_utc_timestamp())}.json",
"documentName": f"email_draft_created_{self._format_timestamp_for_filename()}.json",
"documentData": {
"status": "success",
"message": "Email draft created successfully",
@ -810,19 +814,19 @@ class MethodOutlook(MethodBase):
)
else:
logger.error(f"Failed to create draft. Status: {response.status_code}, Response: {response.text}")
return ActionResult.failure(error=f"Failed to create email draft: {response.status_code} - {response.text}")
return ActionResult.isFailure(error=f"Failed to create email draft: {response.status_code} - {response.text}")
except ImportError:
logger.error("requests module not available")
return ActionResult.failure(error="requests module not available")
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error creating email draft via Microsoft Graph API: {str(e)}")
return ActionResult.failure(error=f"Failed to create email draft: {str(e)}")
return ActionResult.isFailure(error=f"Failed to create email draft: {str(e)}")
return ActionResult(
success=True,
documents=[{
"documentName": f"outlook_email_draft_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_email_draft_{self._format_timestamp_for_filename()}.json",
"documentData": {
"connectionReference": connectionReference,
"composedEmailReference": composed_email_ref,
@ -848,7 +852,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error creating email draft: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
@action
async def searchEmails(self, parameters: Dict[str, Any]) -> ActionResult:
@ -871,16 +875,16 @@ class MethodOutlook(MethodBase):
# Validate parameters
if not connectionReference:
return ActionResult.failure(error="Connection reference is required")
return ActionResult.isFailure(error="Connection reference is required")
if not query or not query.strip():
return ActionResult.failure(error="Search query is required and cannot be empty")
return ActionResult.isFailure(error="Search query is required and cannot be empty")
# Check if this is a folder specification query
if query.strip().lower().startswith('folder:'):
folder_name = query.strip()[7:].strip() # Remove "folder:" prefix
if not folder_name:
return ActionResult.failure(error="Invalid folder specification. Use format 'folder:FolderName'")
return ActionResult.isFailure(error="Invalid folder specification. Use format 'folder:FolderName'")
logger.info(f"Search query is a folder specification: {folder_name}")
# Validate limit
@ -896,7 +900,7 @@ class MethodOutlook(MethodBase):
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Search emails using Microsoft Graph API
try:
@ -1013,10 +1017,10 @@ class MethodOutlook(MethodBase):
except ImportError:
logger.error("requests module not available")
return ActionResult.failure(error="requests module not available")
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error searching emails via Microsoft Graph API: {str(e)}")
return ActionResult.failure(error=f"Failed to search emails: {str(e)}")
return ActionResult.isFailure(error=f"Failed to search emails: {str(e)}")
# Determine output format based on expected formats
output_extension = ".json" # Default
@ -1036,7 +1040,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=True,
documents=[{
"documentName": f"outlook_email_search_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_email_search_{self._format_timestamp_for_filename()}.json",
"documentData": {
"connectionReference": connectionReference,
"query": query,
@ -1056,7 +1060,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error searching emails: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
async def listDrafts(self, parameters: Dict[str, Any]) -> ActionResult:
"""
@ -1075,12 +1079,12 @@ class MethodOutlook(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference:
return ActionResult.failure(error="Connection reference is required")
return ActionResult.isFailure(error="Connection reference is required")
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# List drafts using Microsoft Graph API
try:
@ -1137,10 +1141,10 @@ class MethodOutlook(MethodBase):
except ImportError:
logger.error("requests module not available")
return ActionResult.failure(error="requests module not available")
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error listing drafts via Microsoft Graph API: {str(e)}")
return ActionResult.failure(error=f"Failed to list drafts: {str(e)}")
return ActionResult.isFailure(error=f"Failed to list drafts: {str(e)}")
# Determine output format based on expected formats
output_extension = ".json" # Default
@ -1160,7 +1164,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=True,
documents=[{
"documentName": f"outlook_drafts_list_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_drafts_list_{self._format_timestamp_for_filename()}.json",
"documentData": {
"connectionReference": connectionReference,
"folder": folder,
@ -1179,7 +1183,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error listing drafts: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
async def findDrafts(self, parameters: Dict[str, Any]) -> ActionResult:
"""
@ -1196,12 +1200,12 @@ class MethodOutlook(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference:
return ActionResult.failure(error="Connection reference is required")
return ActionResult.isFailure(error="Connection reference is required")
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Find drafts using Microsoft Graph API
try:
@ -1248,10 +1252,10 @@ class MethodOutlook(MethodBase):
except ImportError:
logger.error("requests module not available")
return ActionResult.failure(error="requests module not available")
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error finding drafts via Microsoft Graph API: {str(e)}")
return ActionResult.failure(error=f"Failed to find drafts: {str(e)}")
return ActionResult.isFailure(error=f"Failed to find drafts: {str(e)}")
# Determine output format based on expected formats
output_extension = ".json" # Default
@ -1271,7 +1275,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=True,
documents=[{
"documentName": f"outlook_drafts_found_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_drafts_found_{self._format_timestamp_for_filename()}.json",
"documentData": {
"connectionReference": connectionReference,
"limit": limit,
@ -1289,7 +1293,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error finding drafts: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
def _getFolderNameById(self, folder_id: str, connection: Dict[str, Any]) -> str:
"""
@ -1335,12 +1339,12 @@ class MethodOutlook(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference:
return ActionResult.failure(error="Connection reference is required")
return ActionResult.isFailure(error="Connection reference is required")
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Check Drafts folder directly
try:
@ -1357,7 +1361,7 @@ class MethodOutlook(MethodBase):
drafts_folder_id = self._getFolderId("Drafts", connection)
if not drafts_folder_id:
return ActionResult.failure(error="Could not find Drafts folder")
return ActionResult.isFailure(error="Could not find Drafts folder")
# Get messages directly from Drafts folder
api_url = f"{graph_url}/me/mailFolders/{drafts_folder_id}/messages"
@ -1391,10 +1395,10 @@ class MethodOutlook(MethodBase):
except ImportError:
logger.error("requests module not available")
return ActionResult.failure(error="requests module not available")
return ActionResult.isFailure(error="requests module not available")
except Exception as e:
logger.error(f"Error checking Drafts folder via Microsoft Graph API: {str(e)}")
return ActionResult.failure(error=f"Failed to check Drafts folder: {str(e)}")
return ActionResult.isFailure(error=f"Failed to check Drafts folder: {str(e)}")
# Determine output format based on expected formats
output_extension = ".json" # Default
@ -1414,7 +1418,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=True,
documents=[{
"documentName": f"outlook_drafts_folder_check_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_drafts_folder_check_{self._format_timestamp_for_filename()}.json",
"documentData": {
"connectionReference": connectionReference,
"limit": limit,
@ -1432,7 +1436,7 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error checking Drafts folder: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
@action
async def composeEmail(self, parameters: Dict[str, Any]) -> ActionResult:
@ -1483,7 +1487,7 @@ class MethodOutlook(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not context:
return ActionResult.failure(error="Context is required for email composition")
return ActionResult.isFailure(error="Context is required for email composition")
# Process input documents to extract content for AI context (NOT as attachments)
document_content_summary = ""
@ -1628,10 +1632,10 @@ class MethodOutlook(MethodBase):
except json.JSONDecodeError as e:
logger.error(f"AI response is not valid JSON: {str(e)}")
return ActionResult.failure(error=f"AI response is not valid JSON: {str(e)}")
return ActionResult.isFailure(error=f"AI response is not valid JSON: {str(e)}")
except ValueError as e:
logger.error(f"AI response missing required fields: {str(e)}")
return ActionResult.failure(error=f"AI response missing required fields: {str(e)}")
return ActionResult.isFailure(error=f"AI response missing required fields: {str(e)}")
# Create result data - output the email data directly, not wrapped
result_data = {
@ -1664,7 +1668,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=True,
documents=[{
"documentName": f"composed_email_{int(get_utc_timestamp())}.json",
"documentName": f"composed_email_{self._format_timestamp_for_filename()}.json",
"documentData": result_data,
"mimeType": "application/json"
}]
@ -1672,11 +1676,11 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error calling AI for email composition: {str(e)}")
return ActionResult.failure(error=f"Failed to compose email: {str(e)}")
return ActionResult.isFailure(error=f"Failed to compose email: {str(e)}")
except Exception as e:
logger.error(f"Error composing email: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
async def checkPermissions(self, parameters: Dict[str, Any]) -> ActionResult:
"""
@ -1688,12 +1692,12 @@ class MethodOutlook(MethodBase):
try:
connectionReference = parameters.get("connectionReference")
if not connectionReference:
return ActionResult.failure(error="Connection reference is required")
return ActionResult.isFailure(error="Connection reference is required")
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="Failed to get Microsoft connection")
return ActionResult.isFailure(error="Failed to get Microsoft connection")
# Check permissions
permissions_ok = await self._checkPermissions(connection)
@ -1702,7 +1706,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=True,
documents=[{
"documentName": f"outlook_permissions_check_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_permissions_check_{self._format_timestamp_for_filename()}.json",
"documentData": {
"permissions": "✅ All necessary permissions are available",
"scopes": connection.get("scopes", []),
@ -1716,7 +1720,7 @@ class MethodOutlook(MethodBase):
return ActionResult(
success=False,
documents=[{
"documentName": f"outlook_permissions_check_{int(get_utc_timestamp())}.json",
"documentName": f"outlook_permissions_check_{self._format_timestamp_for_filename()}.json",
"documentData": {
"permissions": "❌ Missing necessary permissions",
"requiredScopes": ["Mail.ReadWrite", "Mail.Send", "Mail.ReadWrite.Shared", "User.Read"],
@ -1732,5 +1736,5 @@ class MethodOutlook(MethodBase):
except Exception as e:
logger.error(f"Error checking permissions: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))

View file

@ -4,9 +4,10 @@ Handles SharePoint document operations using the SharePoint service.
"""
import logging
import json
import re
from typing import Dict, Any, List, Optional
from datetime import datetime, UTC
import json
import base64
from urllib.parse import urlparse
import aiohttp
@ -19,12 +20,16 @@ from modules.shared.timezoneUtils import get_utc_timestamp
logger = logging.getLogger(__name__)
class MethodSharepoint(MethodBase):
"""SharePoint method implementation for document operations"""
"""SharePoint operations methods."""
def __init__(self, serviceCenter: Any):
super().__init__(serviceCenter)
def __init__(self, service):
super().__init__(service)
self.name = "sharepoint"
self.description = "Handle Microsoft SharePoint document operations"
self.description = "SharePoint operations methods"
def _format_timestamp_for_filename(self) -> str:
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
def _getMicrosoftConnection(self, connectionReference: str) -> Optional[Dict[str, Any]]:
"""Get Microsoft connection from connection reference"""
@ -179,21 +184,21 @@ class MethodSharepoint(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference or not siteUrl or not query:
return ActionResult.failure(error="Connection reference, site URL, and query are required")
return ActionResult.isFailure(error="Connection reference, site URL, and query are required")
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Parse site URL to get hostname and site path
site_info = self._parseSiteUrl(siteUrl)
if not site_info["hostname"] or not site_info["sitePath"]:
return ActionResult.failure(error=f"Invalid SharePoint site URL: {siteUrl}")
return ActionResult.isFailure(error=f"Invalid SharePoint site URL: {siteUrl}")
# Get site ID
site_id = await self._getSiteId(connection["accessToken"], site_info["hostname"], site_info["sitePath"])
if not site_id:
return ActionResult.failure(error="Failed to get SharePoint site ID")
return ActionResult.isFailure(error="Failed to get SharePoint site ID")
try:
# Use Microsoft Graph search API
@ -204,7 +209,7 @@ class MethodSharepoint(MethodBase):
search_result = await self._makeGraphApiCall(connection["accessToken"], endpoint)
if "error" in search_result:
return ActionResult.failure(error=f"Search failed: {search_result['error']}")
return ActionResult.isFailure(error=f"Search failed: {search_result['error']}")
# Process search results
items = search_result.get("value", [])
@ -260,7 +265,7 @@ class MethodSharepoint(MethodBase):
except Exception as e:
logger.error(f"Error searching SharePoint: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
# Determine output format based on expected formats
output_extension = ".json" # Default
@ -279,7 +284,7 @@ class MethodSharepoint(MethodBase):
success=True,
documents=[
{
"documentName": f"sharepoint_find_path_{int(get_utc_timestamp())}{output_extension}",
"documentName": f"sharepoint_find_path_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}
@ -288,7 +293,7 @@ class MethodSharepoint(MethodBase):
except Exception as e:
logger.error(f"Error finding document path: {str(e)}")
return ActionResult.failure(error=str(e))
return ActionResult.isFailure(error=str(e))
@action
async def readDocument(self, parameters: Dict[str, Any]) -> ActionResult:
@ -312,7 +317,7 @@ class MethodSharepoint(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not documentList or not connectionReference or not siteUrl or not documentPaths:
return ActionResult.failure(error="Document list reference, connection reference, site URL, and document paths are required")
return ActionResult.isFailure(error="Document list reference, connection reference, site URL, and document paths are required")
# Get documents from reference - ensure documentList is a list, not a string
if isinstance(documentList, str):
@ -332,21 +337,21 @@ class MethodSharepoint(MethodBase):
logger.info(f"Created {len(chatDocuments)} mock documents for testing")
if not chatDocuments:
return ActionResult.failure(error="No documents found for the provided reference")
return ActionResult.isFailure(error="No documents found for the provided reference")
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Parse site URL to get hostname and site path
site_info = self._parseSiteUrl(siteUrl)
if not site_info["hostname"] or not site_info["sitePath"]:
return ActionResult.failure(error=f"Invalid SharePoint site URL: {siteUrl}")
return ActionResult.isFailure(error=f"Invalid SharePoint site URL: {siteUrl}")
# Get site ID
site_id = await self._getSiteId(connection["accessToken"], site_info["hostname"], site_info["sitePath"])
if not site_id:
return ActionResult.failure(error="Failed to get SharePoint site ID")
return ActionResult.isFailure(error="Failed to get SharePoint site ID")
# Process each document path
read_results = []
@ -469,7 +474,7 @@ class MethodSharepoint(MethodBase):
success=True,
documents=[
{
"documentName": f"sharepoint_documents_{int(get_utc_timestamp())}{output_extension}",
"documentName": f"sharepoint_documents_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}
@ -504,29 +509,29 @@ class MethodSharepoint(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference or not siteUrl or not documentPaths or not documentList or not fileNames:
return ActionResult.failure(error="Connection reference, site URL, document paths, document list, and file names are required")
return ActionResult.isFailure(error="Connection reference, site URL, document paths, document list, and file names are required")
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
# Get documents from reference - ensure documentList is a list, not a string
if isinstance(documentList, str):
documentList = [documentList] # Convert string to list
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
if not chatDocuments:
return ActionResult.failure(error="No documents found for the provided reference")
return ActionResult.isFailure(error="No documents found for the provided reference")
# Parse site URL to get hostname and site path
site_info = self._parseSiteUrl(siteUrl)
if not site_info["hostname"] or not site_info["sitePath"]:
return ActionResult.failure(error=f"Invalid SharePoint site URL: {siteUrl}")
return ActionResult.isFailure(error=f"Invalid SharePoint site URL: {siteUrl}")
# Get site ID
site_id = await self._getSiteId(connection["accessToken"], site_info["hostname"], site_info["sitePath"])
if not site_id:
return ActionResult.failure(error="Failed to get SharePoint site ID")
return ActionResult.isFailure(error="Failed to get SharePoint site ID")
# Process each document upload
upload_results = []
@ -646,7 +651,7 @@ class MethodSharepoint(MethodBase):
success=True,
documents=[
{
"documentName": f"sharepoint_upload_{get_utc_timestamp()}{output_extension}",
"documentName": f"sharepoint_upload_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}
@ -680,12 +685,12 @@ class MethodSharepoint(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not connectionReference or not siteUrl or not folderPaths:
return ActionResult.failure(error="Connection reference, site URL, and folder paths are required")
return ActionResult.isFailure(error="Connection reference, site URL, and folder paths are required")
# Get Microsoft connection
connection = self._getMicrosoftConnection(connectionReference)
if not connection:
return ActionResult.failure(error="No valid Microsoft connection found for the provided connection reference")
return ActionResult.isFailure(error="No valid Microsoft connection found for the provided connection reference")
logger.info(f"Starting SharePoint listDocuments for site: {siteUrl}")
logger.debug(f"Connection ID: {connection['id']}")
@ -697,7 +702,7 @@ class MethodSharepoint(MethodBase):
if not site_info["hostname"] or not site_info["sitePath"]:
logger.error(f"Failed to parse site URL: {siteUrl}")
return ActionResult.failure(error=f"Invalid SharePoint site URL: {siteUrl}")
return ActionResult.isFailure(error=f"Invalid SharePoint site URL: {siteUrl}")
# Get site ID
logger.info(f"Getting site ID for hostname: {site_info['hostname']}, path: {site_info['sitePath']}")
@ -705,7 +710,7 @@ class MethodSharepoint(MethodBase):
logger.info(f"Site ID result: {site_id}")
if not site_id:
return ActionResult.failure(error="Failed to get SharePoint site ID")
return ActionResult.isFailure(error="Failed to get SharePoint site ID")
# Process each folder path
list_results = []
@ -854,7 +859,7 @@ class MethodSharepoint(MethodBase):
success=True,
documents=[
{
"documentName": f"sharepoint_document_list_{int(get_utc_timestamp())}{output_extension}",
"documentName": f"sharepoint_document_list_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}

View file

@ -49,6 +49,10 @@ class MethodWeb(MethodBase):
self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
self.timeout = 30
def _format_timestamp_for_filename(self) -> str:
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
return datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
def _readUrl(self, url: str) -> BeautifulSoup:
"""Read a URL and return a BeautifulSoup parser for the content with enhanced error handling"""
if not url or not url.startswith(('http://', 'https://')):
@ -498,10 +502,10 @@ class MethodWeb(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not query:
return ActionResult.failure(error="Search query is required")
return ActionResult.isFailure(error="Search query is required")
if not self.srcApikey:
return ActionResult.failure(error="SerpAPI key not configured")
return ActionResult.isFailure(error="SerpAPI key not configured")
userLanguage = "en"
if hasattr(self.service, 'user') and hasattr(self.service.user, 'language'):
@ -558,7 +562,7 @@ class MethodWeb(MethodBase):
success=True,
documents=[
{
"documentName": f"web_search_{get_utc_timestamp()}{output_extension}",
"documentName": f"web_search_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}
@ -610,7 +614,7 @@ class MethodWeb(MethodBase):
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
if not document:
return ActionResult.failure(error="No document with URL list provided.")
return ActionResult.isFailure(error="No document with URL list provided.")
# Read the document content
with open(document, "r", encoding="utf-8") as f:
@ -622,7 +626,7 @@ class MethodWeb(MethodBase):
urls = [u.strip() for u in urls if u.strip()]
if not urls:
return ActionResult.failure(error="No valid URLs provided in the document.")
return ActionResult.isFailure(error="No valid URLs provided in the document.")
crawl_results = []
for url in urls:
@ -693,7 +697,7 @@ class MethodWeb(MethodBase):
success=True,
documents=[
{
"documentName": f"web_crawl_{int(get_utc_timestamp())}{output_extension}",
"documentName": f"web_crawl_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}
@ -797,7 +801,7 @@ class MethodWeb(MethodBase):
success=True,
documents=[
{
"documentName": f"web_scrape_{int(get_utc_timestamp())}{output_extension}",
"documentName": f"web_scrape_{self._format_timestamp_for_filename()}{output_extension}",
"documentData": result_data,
"mimeType": output_mime_type
}

View file

@ -68,8 +68,12 @@ async def get_workflows(
status=workflow_data.get("status", "running"),
name=workflow_data.get("name"),
currentRound=workflow_data.get("currentRound", 1),
lastActivity=workflow_data.get("lastActivity", get_utc_timestamp()),
startedAt=workflow_data.get("startedAt", get_utc_timestamp()),
currentTask=workflow_data.get("currentTask", 0),
currentAction=workflow_data.get("currentAction", 0),
totalTasks=workflow_data.get("totalTasks", 0),
totalActions=workflow_data.get("totalActions", 0),
lastActivity=workflow_data.get("lastActivity", get_utc_timestamp()),
startedAt=workflow_data.get("startedAt", get_utc_timestamp()),
logs=[ChatLog(**log) for log in workflow_data.get("logs", [])],
messages=[ChatMessage(**msg) for msg in workflow_data.get("messages", [])],
stats=ChatStat(**workflow_data.get("dataStats", {})) if workflow_data.get("dataStats") else ChatStat(
@ -271,6 +275,15 @@ async def get_workflow_messages(
# Get all messages
allMessages = interfaceChat.getWorkflowMessages(workflowId)
# Debug logging: Log attributes for each message
logger.debug(f"Retrieved {len(allMessages)} messages for workflow {workflowId}")
for i, message in enumerate(allMessages):
logger.debug(f"Message {i+1} (ID: {message.id}): {message}")
logger.debug(f" - Type: {getattr(message, 'type', 'N/A')}")
logger.debug(f" - Content: {getattr(message, 'content', 'N/A')[:100]}...")
logger.debug(f" - PublishedAt: {getattr(message, 'publishedAt', 'N/A')}")
logger.debug(f" - All attributes: {message.__dict__}")
# Apply selective data transfer if messageId is provided
if messageId:
# Find the index of the message with the given ID

View file

@ -51,7 +51,9 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
self.chatInterface.updateWorkflow(workflow.id, {
"status": "stopped",
"lastActivity": workflow.lastActivity
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions
})
# Create final stopped message
@ -86,7 +88,9 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
self.chatInterface.updateWorkflow(workflow.id, {
"status": "failed",
"lastActivity": workflow.lastActivity
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions
})
# Create error message
@ -274,7 +278,9 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
self.chatInterface.updateWorkflow(workflow.id, {
"status": "stopped",
"lastActivity": workflow.lastActivity
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions
})
return
elif workflow_result.status == 'failed':
@ -296,7 +302,9 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
self.chatInterface.updateWorkflow(workflow.id, {
"status": "failed",
"lastActivity": workflow.lastActivity
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions
})
return
@ -319,7 +327,9 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
self.chatInterface.updateWorkflow(workflow.id, {
"status": "completed",
"lastActivity": workflow.lastActivity
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions
})
except Exception as e:
@ -342,6 +352,8 @@ class WorkflowManager:
workflow.lastActivity = get_utc_timestamp()
self.chatInterface.updateWorkflow(workflow.id, {
"status": "failed",
"lastActivity": workflow.lastActivity
"lastActivity": workflow.lastActivity,
"totalTasks": workflow.totalTasks,
"totalActions": workflow.totalActions
})

View file

@ -1,17 +1,8 @@
TODO
# UI Rendering of the workflow
- Besseres Rendering der Tasks, Actions, Files (hierarchisch eingerückt) und der Log Entries (ohne Rahmen)
- Beim Laden des Workflows die Logs und Messages synchron laden, saubere chronologische Reihenfolge aller Objekte
- im action planning chat die historie des chats pro message (summary) anzeigen, jeweils pro message die dokumentlisten anzeigen, dmit der kontext aller docikente klar ist
- documents: den file namen anzeigen, nicht den internen dokumentnamen
- Chat: Die Benutzermeldungen pro Action und Task rendern
- Über dem Rendering Fenster der Logs und Messages ein professionelles Dashboard, welches die Animation zeigt und graphisch die Blöcke der Tasks und immer animiert die aktive Action eines Tasks. Die Ansicht dynamisch aktualisiert immer nach Abschluss einer Action
# System
- Backend/UI fix Table Connections mit korrekten Token Infos, View jedesmal neu laden im formGeneric
- model reference diagram for all models. who uses who? --> to see the basic building blocks
- neutralizer to activate AND put back placeholders to the returned data