From db13db0f83ca1520f9de5caf3c2006711704f110 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Wed, 20 Aug 2025 00:01:13 +0200
Subject: [PATCH] refactored connection token management and document handling
---
GOOGLE_OAUTH_SETUP.md | 114 ---
modules/chat/documents/documentGeneration.py | 55 +-
modules/chat/handling/executionState.py | 15 +-
modules/chat/handling/handlingTasks.py | 92 +--
modules/chat/handling/promptFactory.py | 115 ++-
modules/chat/methodBase.py | 123 +--
modules/chat/serviceCenter.py | 9 +-
modules/interfaces/interfaceAppModel.py | 2 +
modules/interfaces/interfaceAppObjects.py | 114 ++-
modules/interfaces/interfaceChatModel.py | 175 ++--
modules/methods/EXCLUDED_methodCoder.py | 327 --------
modules/methods/methodAi.py | 140 ++--
modules/methods/methodDocument.py | 314 +++-----
modules/methods/methodOutlook.py | 790 +++++++------------
modules/methods/methodSharepoint.py | 228 ++----
modules/methods/methodWeb.py | 118 ++-
modules/routes/routeSecurityGoogle.py | 84 ++
modules/routes/routeSecurityMsft.py | 92 ++-
modules/security/tokenManager.py | 170 ++++
notes/changelog.txt | 19 +-
notes/readme.md | 1 +
21 files changed, 1406 insertions(+), 1691 deletions(-)
delete mode 100644 GOOGLE_OAUTH_SETUP.md
delete mode 100644 modules/methods/EXCLUDED_methodCoder.py
create mode 100644 modules/security/tokenManager.py
diff --git a/GOOGLE_OAUTH_SETUP.md b/GOOGLE_OAUTH_SETUP.md
deleted file mode 100644
index 85d4500f..00000000
--- a/GOOGLE_OAUTH_SETUP.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# Google OAuth 2.0 Setup Guide for PowerOn
-
-## Overview
-This guide explains how to set up Google OAuth 2.0 authentication for the PowerOn application.
-
-## Prerequisites
-- A Google account
-- Access to Google Cloud Console (https://console.cloud.google.com/)
-
-## Step 1: Create a Google Cloud Project
-
-1. Go to [Google Cloud Console](https://console.cloud.google.com/)
-2. Click on the project dropdown at the top of the page
-3. Click "New Project"
-4. Enter a project name (e.g., "PowerOn OAuth")
-5. Click "Create"
-
-## Step 2: Enable Google+ API
-
-1. In your new project, go to "APIs & Services" > "Library"
-2. Search for "Google+ API" or "Google Identity"
-3. Click on "Google+ API" and click "Enable"
-
-## Step 3: Create OAuth 2.0 Credentials
-
-1. Go to "APIs & Services" > "Credentials"
-2. Click "Create Credentials" > "OAuth client ID"
-3. If prompted, configure the OAuth consent screen first:
- - Choose "External" user type
- - Fill in the required fields (App name, User support email, Developer contact information)
- - Add scopes: `https://www.googleapis.com/auth/userinfo.profile`, `https://www.googleapis.com/auth/userinfo.email`
- - Add test users if needed
- - Click "Save and Continue" through all sections
-
-4. Back to creating OAuth client ID:
- - Application type: "Web application"
- - Name: "PowerOn Web Client"
- - Authorized redirect URIs: Add your redirect URI
- - For development: `http://localhost:8000/api/google/auth/callback`
- - For production: `https://yourdomain.com/api/google/auth/callback`
-
-5. Click "Create"
-6. **Important**: Copy the Client ID and Client Secret - you'll need these for the next step
-
-## Step 4: Configure PowerOn Application
-
-1. Open your environment file (`gateway/env_dev.env` for development)
-2. Replace the placeholder values with your actual Google OAuth credentials:
-
-```env
-# Google OAuth Configuration
-Service_GOOGLE_CLIENT_ID = your-actual-client-id-from-google-console
-Service_GOOGLE_CLIENT_SECRET = your-actual-client-secret-from-google-console
-Service_GOOGLE_REDIRECT_URI = http://localhost:8000/api/google/auth/callback
-```
-
-3. Save the file
-4. Restart your PowerOn gateway server
-
-## Step 5: Test the Configuration
-
-1. Start your PowerOn application
-2. Go to the Connections module
-3. Click "Connect Google"
-4. You should be redirected to Google's OAuth consent screen
-5. After authorization, you should be redirected back to PowerOn
-
-## Troubleshooting
-
-### Common Issues
-
-#### 1. "Missing required parameter: redirect_uri"
-- **Cause**: Google OAuth client is not properly configured with the redirect URI
-- **Solution**: Ensure the redirect URI in Google Cloud Console exactly matches your application's callback URL
-
-#### 2. "Invalid client" error
-- **Cause**: Client ID or Client Secret is incorrect
-- **Solution**: Double-check the credentials in your environment file
-
-#### 3. "Redirect URI mismatch" error
-- **Cause**: The redirect URI in your OAuth request doesn't match what's configured in Google Cloud Console
-- **Solution**: Ensure both URIs are identical (including protocol, domain, port, and path)
-
-### Debug Steps
-
-1. Check the PowerOn gateway logs for OAuth configuration details
-2. Verify environment variables are loaded correctly
-3. Ensure the Google OAuth client is configured for "Web application" type
-4. Check that the redirect URI includes the full path: `/api/google/auth/callback`
-
-## Security Notes
-
-- **Never commit** your Google OAuth credentials to version control
-- Use environment variables or secure configuration management
-- Regularly rotate your client secrets
-- Monitor OAuth usage in Google Cloud Console
-
-## Production Considerations
-
-For production deployment:
-
-1. Use HTTPS for all OAuth redirects
-2. Configure proper domain verification in Google Cloud Console
-3. Set up monitoring and alerting for OAuth usage
-4. Consider implementing additional security measures like PKCE (Proof Key for Code Exchange)
-
-## Support
-
-If you continue to experience issues:
-
-1. Check the PowerOn gateway logs for detailed error messages
-2. Verify your Google OAuth configuration in Google Cloud Console
-3. Test with a simple OAuth flow to isolate the issue
-4. Ensure your Google Cloud project has billing enabled (required for some APIs)
diff --git a/modules/chat/documents/documentGeneration.py b/modules/chat/documents/documentGeneration.py
index 9f9630de..255e77e3 100644
--- a/modules/chat/documents/documentGeneration.py
+++ b/modules/chat/documents/documentGeneration.py
@@ -21,12 +21,34 @@ class DocumentGenerator:
Returns a list of processed document dictionaries.
"""
try:
- documents = action_result.data.get("documents", [])
+ # Read documents from the standard documents field (not data.documents)
+ documents = action_result.documents if hasattr(action_result, 'documents') else []
+
+ if not documents:
+ logger.info(f"No documents found in action_result.documents for {action.execMethod}.{action.execAction}")
+ return []
+
+ logger.info(f"Processing {len(documents)} documents from action_result.documents")
+
+ # Check if documents are references (strings starting with "docItem:") or actual document objects
+ if documents and isinstance(documents[0], str) and documents[0].startswith("docItem:"):
+ # These are document references, resolve them to actual documents
+ logger.info(f"Resolving {len(documents)} document references to actual documents")
+ try:
+ actual_documents = self.service.getChatDocumentsFromDocumentList(documents)
+ logger.info(f"Resolved {len(actual_documents)} actual documents from references")
+ documents = actual_documents
+ except Exception as e:
+ logger.error(f"Error resolving document references: {str(e)}")
+ return []
+
processed_documents = []
for doc in documents:
processed_doc = self.processSingleDocument(doc, action)
if processed_doc:
processed_documents.append(processed_doc)
+
+ logger.info(f"Successfully processed {len(processed_documents)} documents")
return processed_documents
except Exception as e:
logger.error(f"Error processing action result documents: {str(e)}")
@@ -61,6 +83,35 @@ class DocumentGenerator:
'content': getattr(doc, 'content', ''),
'document': doc
}
+ elif hasattr(doc, 'documentName') and doc.documentName:
+ # ActionDocument object with documentName attribute
+ base_filename = doc.documentName
+ mime_type = getattr(doc, 'mimeType', 'application/octet-stream')
+ content = getattr(doc, 'documentData', '')
+
+ # Add result label to filename for ActionDocument objects
+ if hasattr(action, 'execResultLabel') and action.execResultLabel:
+ result_label = action.execResultLabel.strip()
+ if result_label:
+ # Check if filename already starts with resultLabel to avoid duplication
+ if not base_filename.startswith(f"{result_label}-"):
+ base_filename = f"{result_label}-{base_filename}"
+ logger.info(f"Added resultLabel '{result_label}' as prefix to ActionDocument filename: {base_filename}")
+ else:
+ logger.info(f"ActionDocument filename already has resultLabel prefix: {base_filename}")
+
+ # Calculate file size from actual content
+ fileSize = len(str(content)) if content else 0
+
+ logger.info(f"Processed ActionDocument: {base_filename}, content length: {len(str(content))}, mimeType: {mime_type}")
+
+ return {
+ 'filename': base_filename,
+ 'fileSize': fileSize,
+ 'mimeType': mime_type,
+ 'content': content,
+ 'document': doc
+ }
elif isinstance(doc, dict):
# Dictionary format document - handle both 'documentName' and 'filename' keys
base_filename = doc.get('documentName', doc.get('filename', ''))
@@ -159,7 +210,7 @@ class DocumentGenerator:
"""
try:
logger.info(f"Creating documents from action result for {action.execMethod}.{action.execAction}")
- logger.info(f"Action result data keys: {list(action_result.data.keys())}")
+ logger.info(f"Action result documents count: {len(action_result.documents) if action_result.documents else 0}")
processed_docs = self.processActionResultDocuments(action_result, action, workflow)
logger.info(f"Processed {len(processed_docs)} documents")
diff --git a/modules/chat/handling/executionState.py b/modules/chat/handling/executionState.py
index 5e8a4b8c..ce0ea95a 100644
--- a/modules/chat/handling/executionState.py
+++ b/modules/chat/handling/executionState.py
@@ -33,8 +33,19 @@ class TaskExecutionState:
"""Get available results from successful actions"""
results = []
for action in self.successful_actions:
- if action.data and action.data.get('result'):
- results.append(action.data['result'])
+ if action.documents:
+ # Extract text content from documents
+ for doc in action.documents:
+ if hasattr(doc, 'documentData'):
+ if isinstance(doc.documentData, dict):
+ result_text = doc.documentData.get("result", "")
+ elif isinstance(doc.documentData, str):
+ result_text = doc.documentData
+ else:
+ result_text = str(doc.documentData)
+
+ if result_text and result_text.strip():
+ results.append(result_text)
return results
def shouldRetryTask(self) -> bool:
diff --git a/modules/chat/handling/handlingTasks.py b/modules/chat/handling/handlingTasks.py
index 27e1cabe..9dc1ae5d 100644
--- a/modules/chat/handling/handlingTasks.py
+++ b/modules/chat/handling/handlingTasks.py
@@ -474,8 +474,16 @@ class HandlingTasks:
step_result={
'successful_actions': sum(1 for result in action_results if result.success),
'total_actions': len(action_results),
- 'results': [result.data.get('result', '') for result in action_results if result.success],
- 'errors': [result.error for result in action_results if not result.success]
+ 'results': [self._extractResultText(result) for result in action_results if result.success],
+ 'errors': [result.error for result in action_results if not result.success],
+ 'documents': [
+ {
+ 'action_index': i,
+ 'documents_count': len(result.documents) if hasattr(result, 'documents') and result.documents else 0,
+ 'documents': result.documents if hasattr(result, 'documents') and result.documents else []
+ }
+ for i, result in enumerate(action_results)
+ ]
}
)
# Check workflow status before calling AI service
@@ -624,9 +632,17 @@ class HandlingTasks:
if result.success:
created_documents = self.documentGenerator.createDocumentsFromActionResult(result, action, workflow)
action.setSuccess()
- action.result = result.data.get("result", "")
- action.execResultLabel = result_label
- await self.createActionMessage(action, result, workflow, result_label, created_documents, task_step, task_index)
+ # Extract result text from documents if available, otherwise use empty string
+ action.result = ""
+ if result.documents and len(result.documents) > 0:
+ # Try to get text content from the first document
+ first_doc = result.documents[0]
+ if hasattr(first_doc, 'documentData') and isinstance(first_doc.documentData, dict):
+ action.result = first_doc.documentData.get("result", "")
+ elif hasattr(first_doc, 'documentData') and isinstance(first_doc.documentData, str):
+ action.result = first_doc.documentData
+ action.execResultLabel = result.resultLabel or result_label
+ await self.createActionMessage(action, result, workflow, result.resultLabel or result_label, created_documents, task_step, task_index)
# Log action results
logger.info(f"✓ Action completed successfully")
@@ -689,38 +705,20 @@ class HandlingTasks:
"type": "error"
})
- # Extract document filenames for the ActionResult
- document_filenames = []
- for doc in created_documents:
- if hasattr(doc, 'filename'):
- document_filenames.append(doc.filename)
- elif isinstance(doc, dict) and 'filename' in doc:
- document_filenames.append(doc['filename'])
-
- # Also include the original documents from the service result for validation
- original_documents = result.data.get("documents", [])
-
# Log action summary
logger.info(f"=== TASK {task_num} ACTION {action_num} COMPLETED ===")
+ # Preserve the original documents field from the method result
+ # This ensures the standard document format is maintained
+ original_documents = result.documents if hasattr(result, 'documents') else []
+
+ # Extract result text from documents if available
+ result_text = self._extractResultText(result)
+
return ActionResult(
success=result.success,
- data={
- "result": result.data.get("result", ""),
- "documents": created_documents, # Include actual document objects in data
- "actionId": action.id,
- "actionMethod": action.execMethod,
- "actionName": action.execAction,
- "resultLabel": result_label
- },
- documents=document_filenames, # Keep as filenames for the documents field
- metadata={
- "actionId": action.id,
- "actionMethod": action.execMethod,
- "actionName": action.execAction,
- "resultLabel": result_label
- },
- validation={},
+ documents=original_documents, # Preserve original documents field from method result
+ resultLabel=result.resultLabel or result_label,
error=result.error or ""
)
except Exception as e:
@@ -728,18 +726,8 @@ class HandlingTasks:
action.setError(str(e))
return ActionResult(
success=False,
- data={
- "actionId": action.id,
- "actionMethod": action.execMethod,
- "actionName": action.execAction,
- "documents": []
- },
- metadata={
- "actionId": action.id,
- "actionMethod": action.execMethod,
- "actionName": action.execAction
- },
- validation={},
+ documents=[], # Empty documents for error case
+ resultLabel=result_label,
error=str(e)
)
@@ -901,4 +889,18 @@ class HandlingTasks:
return True
except Exception as e:
logger.error(f"Error validating actions: {str(e)}")
- return False
\ No newline at end of file
+ return False
+
+ def _extractResultText(self, result: ActionResult) -> str:
+ """Extract result text from ActionResult documents"""
+ if not result.success or not result.documents:
+ return ""
+
+ # Try to get text content from the first document
+ first_doc = result.documents[0]
+ if hasattr(first_doc, 'documentData') and isinstance(first_doc.documentData, dict):
+ return first_doc.documentData.get("result", "")
+ elif hasattr(first_doc, 'documentData') and isinstance(first_doc.documentData, str):
+ return first_doc.documentData
+ else:
+ return ""
\ No newline at end of file
diff --git a/modules/chat/handling/promptFactory.py b/modules/chat/handling/promptFactory.py
index a1fd9348..ba5f6dce 100644
--- a/modules/chat/handling/promptFactory.py
+++ b/modules/chat/handling/promptFactory.py
@@ -5,6 +5,9 @@ import json
import logging
from typing import Any, Dict
+# Set up logger
+logger = logging.getLogger(__name__)
+
# Prompt creation helpers extracted from managerChat.py
def createTaskPlanningPrompt(context: Dict[str, Any]) -> str:
@@ -45,21 +48,21 @@ REQUIRED JSON STRUCTURE:
}}
EXAMPLES OF GOOD TASK OBJECTIVES:
-- \"Extract key information from documents for email preparation\"
-- \"Draft professional email incorporating analyzed information\"
-- \"Send email using specified email account\"
-- \"Store email draft and confirmation in system\"
+- \"Analyze documents and extract key insights for business communication\"
+- \"Create professional business communication incorporating analyzed information\"
+- \"Execute business communication using specified channels\"
+- \"Document and store all business communication outcomes\"
EXAMPLES OF GOOD SUCCESS CRITERIA:
-- \"Document analysis completed with key points identified\"
-- \"Email draft created with professional tone and clear structure\"
-- \"Email successfully sent with delivery confirmation\"
-- \"All outputs properly stored and accessible for future use\"
+- \"Key insights extracted and ready for business use\"
+- \"Professional communication created with clear business value\"
+- \"Business communication successfully delivered\"
+- \"All outcomes properly documented and accessible\"
EXAMPLES OF BAD TASK OBJECTIVES:
-- \"Open and read the PDF file\" (too granular)
-- \"Identify table structure\" (technical detail)
-- \"Convert data to CSV format\" (implementation detail)
+- \"Read the PDF file\" (too granular - should be \"Analyze document content\")
+- \"Convert data to CSV\" (implementation detail - should be \"Structure data for analysis\")
+- \"Send email\" (too specific - should be \"Deliver business communication\")
NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
@@ -73,6 +76,7 @@ async def createActionDefinitionPrompt(context, service) -> str:
retry_count = context.retry_count or 0
previous_action_results = context.previous_action_results or []
previous_review_result = context.previous_review_result
+ previous_handover = getattr(context, 'previous_handover', None)
methodList = service.getMethodsList()
method_actions = {}
for sig in methodList:
@@ -106,10 +110,17 @@ RETRY CONTEXT (Attempt {retry_count}):
Previous action results that failed or were incomplete:
"""
for i, result in enumerate(previous_action_results):
- retry_context += f"- Action {i+1}: {result.actionMethod or 'unknown'}.{result.actionName or 'unknown'}\n"
+ retry_context += f"- Action {i+1}: ActionResult\n"
retry_context += f" Status: {result.success and 'success' or 'failed'}\n"
retry_context += f" Error: {result.error or 'None'}\n"
- retry_context += f" Result: {(result.data.get('result', '') if result.data else '')[:100]}...\n"
+ # Check if result has documents and show document info
+ if hasattr(result, 'documents') and result.documents:
+ doc_info = f"Documents: {len(result.documents)} document(s)"
+ if result.documents[0].documentName:
+ doc_info += f" - {result.documents[0].documentName}"
+ retry_context += f" {doc_info}\n"
+ else:
+ retry_context += f" Documents: None\n"
if previous_review_result:
retry_context += f"""
Previous review feedback:
@@ -169,6 +180,16 @@ SUCCESS CRITERIA: {success_criteria_str}
CONTEXT - Chat History:
{messageSummary}
+WORKFLOW CONTEXT - Previous Messages Summary:
+The following summarizes key information from previous workflow interactions to provide context for continued workflows:
+- Previous user inputs and their outcomes
+- Key decisions and findings from earlier tasks
+- Document processing results and insights
+- User preferences and requirements established
+- Any constraints or limitations identified
+
+This context helps ensure your actions build upon previous work and maintain consistency with the overall workflow objectives.
+
AVAILABLE METHODS AND ACTIONS (with signatures):
{available_methods_str}
@@ -191,7 +212,12 @@ DOCUMENT REFERENCE EXAMPLES:
- Inventing message IDs instead of using actual document labels
PREVIOUS RESULTS: {previous_results_str}
-IMPROVEMENTS NEEDED: {improvements_str}{retry_context}
+IMPROVEMENTS NEEDED: {improvements_str}
+
+PREVIOUS TASK HANDOVER CONTEXT:
+{previous_handover.workflowSummary if previous_handover and previous_handover.workflowSummary else 'No previous task handover available'}
+
+{retry_context}
ACTION GENERATION PRINCIPLES:
- Create meaningful actions per task step
@@ -206,6 +232,13 @@ ACTION GENERATION PRINCIPLES:
- Address specific issues mentioned in previous review feedback
- When specifying expectedDocumentFormats, ensure AI prompts explicitly request pure data without markdown formatting
+DOCUMENT ROUTING GUIDANCE:
+- Each action should produce documents with a clear resultLabel for routing
+- Use consistent naming: "task{{task_id}}_action{{action_number}}_{{descriptive_label}}"
+- Ensure document flow: Action A produces documents that Action B can consume
+- Document labels should be descriptive of content, not just "results" or "output"
+- Consider what subsequent actions will need and structure outputs accordingly
+
INSTRUCTIONS:
- Generate actions to accomplish this task step using available documents, connections, and previous results
- Use docItem for single documents and docList labels for groups of documents as shown in AVAILABLE DOCUMENTS
@@ -382,46 +415,46 @@ async def createResultReviewPrompt(review_context) -> str:
for action_result in (review_context.action_results or []):
documents_metadata = []
- # FIX: Look for documents in the correct place - action_result.data.documents contains actual document objects
- # action_result.documents only contains document references (strings)
- documents_to_check = action_result.data.get("documents", [])
+ # Get document information from step_result.documents
+ action_index = len(step_result_serializable['action_results'])
+ step_documents = step_result.get('documents', [])
- for doc in documents_to_check:
- if hasattr(doc, 'filename'):
+ logger.debug(f"Processing action {action_index}: step_documents count = {len(step_documents)}")
+
+ if action_index < len(step_documents):
+ # Use the document information from step_result
+ step_doc_info = step_documents[action_index]
+ documents_count = step_doc_info.get('documents_count', 0)
+ documents_list = step_doc_info.get('documents', [])
+
+ logger.debug(f"Action {action_index}: documents_count = {documents_count}, documents_list length = {len(documents_list)}")
+
+ # Process the actual documents
+ for doc in documents_list:
+ # These are ActionDocument objects from ActionResult.documents
documents_metadata.append({
- 'filename': doc.filename,
- 'fileSize': getattr(doc, 'fileSize', 0),
+ 'filename': doc.documentName or 'unknown',
+ 'fileSize': len(str(doc.documentData or '')),
'mimeType': getattr(doc, 'mimeType', 'unknown')
})
- elif isinstance(doc, dict):
- documents_metadata.append({
- 'filename': doc.get('filename', 'unknown'),
- 'fileSize': doc.get('fileSize', 0),
- 'mimeType': doc.get('mimeType', 'unknown')
- })
- elif isinstance(doc, str):
- # Handle case where documents are just filenames
- documents_metadata.append({
- 'filename': doc,
- 'fileSize': 0,
- 'mimeType': 'unknown'
- })
+ else:
+ logger.warning(f"Action {action_index}: No step_documents info found - this should not happen with the new architecture")
+ # No fallback - if step_result.documents is missing, we have a bug
serializable_action_result = {
'status': 'completed' if action_result.success else 'failed',
- 'result_summary': action_result.data.get('result', '')[:200] + '...' if len(action_result.data.get('result', '')) > 200 else action_result.data.get('result', ''),
+ 'result_summary': action_result.resultLabel or 'Action completed successfully',
'error': action_result.error,
- 'resultLabel': action_result.data.get('resultLabel', ''),
+ 'resultLabel': action_result.resultLabel or '',
'documents_count': len(documents_metadata),
'documents_metadata': documents_metadata,
- 'actionId': action_result.actionId,
- 'actionMethod': action_result.actionMethod,
- 'actionName': action_result.actionName,
'success_indicator': (
- 'documents' if len(documents_metadata) > 0 else
- 'text_result' if action_result.data.get('result', '').strip() else 'none'
+ 'documents' if len(documents_metadata) > 0 else 'none'
)
}
+
+ logger.debug(f"Action {action_index}: Final documents_count = {len(documents_metadata)}")
+
step_result_serializable['action_results'].append(serializable_action_result)
step_result_json = json.dumps(step_result_serializable, indent=2, ensure_ascii=False)
success_criteria_str = ', '.join(task_step.success_criteria or [])
diff --git a/modules/chat/methodBase.py b/modules/chat/methodBase.py
index 863a7ddb..99c602d1 100644
--- a/modules/chat/methodBase.py
+++ b/modules/chat/methodBase.py
@@ -3,7 +3,7 @@ from typing import Dict, List, Optional, Any, Literal
from datetime import datetime, UTC
from pydantic import BaseModel, Field
import logging
-from modules.interfaces.interfaceChatModel import ActionResult
+
from functools import wraps
import inspect
@@ -41,7 +41,7 @@ class MethodBase:
sig = inspect.signature(attr)
params = {}
for param_name, param in sig.parameters.items():
- if param_name not in ['self', 'parameters', 'authData']:
+ if param_name not in ['self', 'parameters']:
param_type = param.annotation if param.annotation != param.empty else Any
params[param_name] = {
'type': param_type,
@@ -130,18 +130,6 @@ class MethodBase:
descriptions[lastParam] += " " + line
return descriptions, types
- def _validateDocumentListParameter(self, parameters: Dict[str, Any], paramName: str = "documentList") -> bool:
- """Validate that documentList parameter is a list of strings"""
- if paramName not in parameters:
- return False
-
- value = parameters[paramName]
- if not isinstance(value, list):
- return False
-
- # Check that all items in the list are strings
- return all(isinstance(item, str) for item in value)
-
def _extractMainDescription(self, docstring: str) -> str:
"""Extract main description from docstring"""
if not docstring:
@@ -167,109 +155,4 @@ class MethodBase:
elif hasattr(type_annotation, '_name'):
return type_annotation._name
else:
- return str(type_annotation)
-
- async def execute(self, action: str, parameters: Dict[str, Any], authData: Optional[Dict[str, Any]] = None) -> ActionResult:
- """
- Execute method action with authentication data
-
- Args:
- action: The action to execute
- parameters: Action parameters
- authData: Authentication data
-
- Returns:
- ActionResult containing execution results
-
- Raises:
- ValueError: If action is not supported
- RuntimeError: If authentication fails
- """
- try:
- # Validate action
- if action not in self.actions:
- raise ValueError(f"Unsupported action: {action}")
-
- # Validate parameters
- if not await self.validateParameters(action, parameters):
- return self._createResult(
- success=False,
- data={},
- error="Invalid parameters"
- )
-
- # Validate authentication
- if not self._validateAuth(authData):
- return self._createResult(
- success=False,
- data={},
- error="Authentication failed"
- )
-
- # Execute action
- return await self._executeAction(action, parameters, authData)
-
- except Exception as e:
- self.logger.error(f"Error executing action {action}: {str(e)}")
- return self._createResult(
- success=False,
- data={},
- error=str(e)
- )
-
- async def _executeAction(self, action: str, parameters: Dict[str, Any], authData: Optional[Dict[str, Any]] = None) -> ActionResult:
- """Execute specific action - to be implemented by subclasses"""
- raise NotImplementedError
-
- async def validateParameters(self, action: str, parameters: Dict[str, Any]) -> bool:
- """Validate action parameters"""
- try:
- if action not in self.actions:
- return False
-
- actionDef = self.actions[action]
- requiredParams = {k for k, v in actionDef['parameters'].items() if v['required']}
-
- # Check required parameters
- if not all(param in parameters for param in requiredParams):
- return False
-
- # Validate documentList parameter if present
- if "documentList" in parameters:
- if not self._validateDocumentListParameter(parameters, "documentList"):
- self.logger.error("documentList parameter must be a list of strings")
- return False
-
- return True
-
- except Exception as e:
- self.logger.error(f"Error validating parameters: {str(e)}")
- return False
-
- async def rollback(self, action: str, parameters: Dict[str, Any], authData: Optional[Dict[str, Any]] = None) -> None:
- """Rollback action if needed"""
- try:
- await self._rollbackAction(action, parameters, authData)
- except Exception as e:
- self.logger.error(f"Error rolling back action {action}: {str(e)}")
- raise
-
- async def _rollbackAction(self, action: str, parameters: Dict[str, Any], authData: Optional[Dict[str, Any]] = None) -> None:
- """Rollback specific action - to be implemented by subclasses"""
- pass
-
- def _createResult(self, success: bool, data: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None, error: Optional[str] = None) -> ActionResult:
- """Create a method result"""
- return ActionResult(
- success=success,
- data=data,
- metadata=metadata or {},
- validation={},
- error=error
- )
-
- def _addValidationMessage(self, result: ActionResult, message: str) -> None:
- """Add a validation message to the result"""
- if 'messages' not in result.validation:
- result.validation['messages'] = []
- result.validation['messages'].append(message)
\ No newline at end of file
+ return str(type_annotation)
\ No newline at end of file
diff --git a/modules/chat/serviceCenter.py b/modules/chat/serviceCenter.py
index 3e3d6aa1..574653cb 100644
--- a/modules/chat/serviceCenter.py
+++ b/modules/chat/serviceCenter.py
@@ -68,13 +68,13 @@ class ServiceCenter:
# Discover actions from public methods
actions = {}
for methodName, method in inspect.getmembers(type(methodInstance), predicate=inspect.iscoroutinefunction):
- if not methodName.startswith('_') and methodName not in ['execute', 'validateParameters']:
+ if not methodName.startswith('_'):
# Bind the method to the instance
bound_method = method.__get__(methodInstance, type(methodInstance))
sig = inspect.signature(method)
params = {}
for paramName, param in sig.parameters.items():
- if paramName not in ['self', 'authData']:
+ if paramName not in ['self']:
# Get parameter type
paramType = param.annotation if param.annotation != param.empty else Any
@@ -719,9 +719,8 @@ Please provide a clear summary of this message."""
documentId=document.id
)
- # Update objectId to match document ID
- extractedContent.objectId = document.id
- extractedContent.objectType = "ChatDocument"
+ # Note: ExtractedContent model only has 'id' and 'contents' fields
+ # No need to set objectId or objectType as they don't exist in the model
return extractedContent
diff --git a/modules/interfaces/interfaceAppModel.py b/modules/interfaces/interfaceAppModel.py
index c98e62fc..51793caa 100644
--- a/modules/interfaces/interfaceAppModel.py
+++ b/modules/interfaces/interfaceAppModel.py
@@ -191,6 +191,7 @@ class Token(BaseModel, ModelMixin):
id: Optional[str] = None
userId: str
authority: AuthAuthority
+ connectionId: Optional[str] = Field(None, description="ID of the connection this token belongs to")
tokenAccess: str
tokenType: str = "bearer"
expiresAt: float
@@ -208,6 +209,7 @@ register_model_labels(
"id": {"en": "ID", "fr": "ID"},
"userId": {"en": "User ID", "fr": "ID utilisateur"},
"authority": {"en": "Authority", "fr": "Autorité"},
+ "connectionId": {"en": "Connection ID", "fr": "ID de connexion"},
"tokenAccess": {"en": "Access Token", "fr": "Jeton d'accès"},
"tokenType": {"en": "Token Type", "fr": "Type de jeton"},
"expiresAt": {"en": "Expires At", "fr": "Expire le"},
diff --git a/modules/interfaces/interfaceAppObjects.py b/modules/interfaces/interfaceAppObjects.py
index a3de6f69..3eeccc84 100644
--- a/modules/interfaces/interfaceAppObjects.py
+++ b/modules/interfaces/interfaceAppObjects.py
@@ -756,6 +756,7 @@ class AppObjects:
# Convert to dict and ensure all fields are properly set
token_dict = token.dict()
+ # Ensure userId is set to current user (this might override the token's userId)
token_dict["userId"] = self.currentUser.id
# Convert datetime objects to ISO format strings
@@ -776,8 +777,8 @@ class AppObjects:
logger.error(f"Error saving token: {str(e)}")
raise
- def getToken(self, authority: str) -> Optional[Token]:
- """Get the latest valid token for the current user and authority"""
+ def getToken(self, authority: str, auto_refresh: bool = True) -> Optional[Token]:
+ """Get the latest valid token for the current user and authority, optionally auto-refresh if expired"""
try:
# Get tokens for this user and authority
tokens = self.db.getRecordset("tokens", recordFilter={
@@ -794,8 +795,28 @@ class AppObjects:
# Check if token is expired
if latest_token.expiresAt and latest_token.expiresAt < datetime.now().timestamp():
- logger.warning(f"Token for {authority} is expired (expiresAt: {latest_token.expiresAt})")
- return None # Don't return expired tokens
+ if auto_refresh:
+ logger.info(f"Token for {authority} is expired, attempting refresh...")
+
+ # Import TokenManager here to avoid circular imports
+ from modules.security.tokenManager import TokenManager
+ token_manager = TokenManager()
+
+ # Try to refresh the token
+ refreshed_token = token_manager.refresh_token(latest_token)
+ if refreshed_token:
+ # Save the new token and delete the old one
+ self.saveToken(refreshed_token)
+ self.deleteToken(authority)
+
+ logger.info(f"Successfully refreshed token for {authority}")
+ return refreshed_token
+ else:
+ logger.warning(f"Failed to refresh expired token for {authority}")
+ return None
+ else:
+ logger.warning(f"Token for {authority} is expired (expiresAt: {latest_token.expiresAt})")
+ return None
return latest_token
@@ -803,6 +824,53 @@ class AppObjects:
logger.error(f"Error getting token: {str(e)}")
return None
+ def getTokenForConnection(self, connectionId: str, auto_refresh: bool = True) -> Optional[Token]:
+ """Get the token for a specific connection, optionally auto-refresh if expired"""
+ try:
+ # Get token for this specific connection
+ tokens = self.db.getRecordset("tokens", recordFilter={
+ "connectionId": connectionId
+ })
+
+ if not tokens:
+ logger.warning(f"No token found for connection: {connectionId}")
+ return None
+
+ # Sort by creation date and get the latest
+ tokens.sort(key=lambda x: x.get("createdAt", ""), reverse=True)
+ latest_token = Token(**tokens[0])
+
+ # Check if token is expired
+ if latest_token.expiresAt and latest_token.expiresAt < datetime.now().timestamp():
+ if auto_refresh:
+ logger.info(f"Token for connection {connectionId} is expired, attempting refresh...")
+
+ # Import TokenManager here to avoid circular imports
+ from modules.security.tokenManager import TokenManager
+ token_manager = TokenManager()
+
+ # Try to refresh the token
+ refreshed_token = token_manager.refresh_token(latest_token)
+ if refreshed_token:
+ # Save the new token and delete the old one
+ self.saveToken(refreshed_token)
+ self.deleteTokenByConnectionId(connectionId)
+
+ logger.info(f"Successfully refreshed token for connection {connectionId}")
+ return refreshed_token
+ else:
+ logger.warning(f"Failed to refresh expired token for connection {connectionId}")
+ return None
+ else:
+ logger.warning(f"Token for connection {connectionId} is expired (expiresAt: {latest_token.expiresAt})")
+ return None
+
+ return latest_token
+
+ except Exception as e:
+ logger.error(f"Error getting token for connection {connectionId}: {str(e)}")
+ return None
+
def deleteToken(self, authority: str) -> None:
"""Delete all tokens for the current user and authority"""
try:
@@ -823,6 +891,44 @@ class AppObjects:
logger.error(f"Error deleting token: {str(e)}")
raise
+ def deleteTokenByConnectionId(self, connectionId: str) -> None:
+ """Delete all tokens for a specific connection"""
+ try:
+ # Get tokens to delete
+ tokens = self.db.getRecordset("tokens", recordFilter={
+ "connectionId": connectionId
+ })
+
+ # Delete each token
+ for token in tokens:
+ self.db.recordDelete("tokens", token["id"])
+
+ # Clear cache to ensure fresh data
+ self._clearTableCache("tokens")
+
+ except Exception as e:
+ logger.error(f"Error deleting token for connection {connectionId}: {str(e)}")
+ raise
+
+ def logout(self) -> None:
+ """Logout current user - clear user context and tokens"""
+ try:
+ # Clear user context
+ self.currentUser = None
+ self.userId = None
+ self.mandateId = None
+ self.access = None
+
+ # Clear database context
+ if hasattr(self, 'db'):
+ self.db.updateContext("")
+
+ logger.info("User logged out successfully")
+
+ except Exception as e:
+ logger.error(f"Error during logout: {str(e)}")
+ raise
+
# Public Methods
def getInterface(currentUser: User) -> AppObjects:
diff --git a/modules/interfaces/interfaceChatModel.py b/modules/interfaces/interfaceChatModel.py
index 2b28b3bc..59e033e2 100644
--- a/modules/interfaces/interfaceChatModel.py
+++ b/modules/interfaces/interfaceChatModel.py
@@ -12,73 +12,50 @@ from modules.shared.attributeUtils import register_model_labels, ModelMixin
# ===== Method Models =====
+class ActionDocument(BaseModel, ModelMixin):
+ """Clear document structure for action results"""
+ documentName: str = Field(description="Name of the document")
+ documentData: Any = Field(description="Content/data of the document")
+ mimeType: str = Field(description="MIME type of the document")
+
+# Register labels for ActionDocument
+register_model_labels(
+ "ActionDocument",
+ {"en": "Action Document", "fr": "Document d'action"},
+ {
+ "documentName": {"en": "Document Name", "fr": "Nom du document"},
+ "documentData": {"en": "Document Data", "fr": "Données du document"},
+ "mimeType": {"en": "MIME Type", "fr": "Type MIME"}
+ }
+)
+
class ActionResult(BaseModel, ModelMixin):
- """Unified model for action results with workflow state management"""
- # Core result fields
- success: bool = Field(description="Whether the method execution was successful")
- data: Dict[str, Any] = Field(description="Result data")
- metadata: Dict[str, Any] = Field(default_factory=dict, description="Additional metadata")
- error: Optional[str] = Field(None, description="Error message if any")
+ """Clean action result with documents as primary output"""
+ # Core result
+ success: bool = Field(description="Whether execution succeeded")
+ error: Optional[str] = Field(None, description="Error message if failed")
- # Action identification
- actionId: Optional[str] = Field(None, description="ID of the action that produced this result")
- actionMethod: Optional[str] = Field(None, description="Method of the action that produced this result")
- actionName: Optional[str] = Field(None, description="Name of the action that produced this result")
-
- # Document handling
- documents: List[str] = Field(default_factory=list, description="List of document references")
- resultLabel: Optional[str] = Field(None, description="Label for the result")
-
- # Validation and workflow state
- validation: Dict[str, Any] = Field(default_factory=dict, description="Validation information")
- is_retry: bool = Field(default=False, description="Whether this is a retry attempt")
- previous_error: Optional[str] = Field(None, description="Previous error message for retries")
- applied_improvements: List[str] = Field(default_factory=list, description="Improvements applied for retry")
+ # Primary output - documents
+ documents: List[ActionDocument] = Field(default_factory=list, description="Document outputs")
+ resultLabel: Optional[str] = Field(None, description="Label for document routing")
@classmethod
- def success(cls, documents: List[str] = None, resultLabel: str = None, data: Dict[str, Any] = None,
- actionId: str = None, actionMethod: str = None, actionName: str = None) -> 'ActionResult':
+ def success(cls, documents: List[ActionDocument] = None, resultLabel: str = None) -> 'ActionResult':
"""Create a successful action result"""
return cls(
success=True,
- data=data or {},
documents=documents or [],
- resultLabel=resultLabel,
- actionId=actionId,
- actionMethod=actionMethod,
- actionName=actionName
+ resultLabel=resultLabel
)
@classmethod
- def failure(cls, error: str, data: Dict[str, Any] = None,
- actionId: str = None, actionMethod: str = None, actionName: str = None) -> 'ActionResult':
+ def failure(cls, error: str, documents: List[ActionDocument] = None, resultLabel: str = None) -> 'ActionResult':
"""Create a failed action result"""
return cls(
success=False,
- data=data or {},
+ documents=documents or [],
error=error,
- actionId=actionId,
- actionMethod=actionMethod,
- actionName=actionName
- )
-
- @classmethod
- def retry(cls, previous_result: 'ActionResult', improvements: List[str] = None) -> 'ActionResult':
- """Create a retry action result based on a previous result"""
- return cls(
- success=previous_result.success,
- data=previous_result.data,
- metadata=previous_result.metadata,
- validation=previous_result.validation,
- error=previous_result.error,
- documents=previous_result.documents,
- resultLabel=previous_result.resultLabel,
- actionId=previous_result.actionId,
- actionMethod=previous_result.actionMethod,
- actionName=previous_result.actionName,
- is_retry=True,
- previous_error=previous_result.error,
- applied_improvements=improvements or []
+ resultLabel=resultLabel
)
# Register labels for ActionResult
@@ -87,18 +64,9 @@ register_model_labels(
{"en": "Action Result", "fr": "Résultat de l'action"},
{
"success": {"en": "Success", "fr": "Succès"},
- "data": {"en": "Data", "fr": "Données"},
- "metadata": {"en": "Metadata", "fr": "Métadonnées"},
- "validation": {"en": "Validation", "fr": "Validation"},
"error": {"en": "Error", "fr": "Erreur"},
"documents": {"en": "Documents", "fr": "Documents"},
- "resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"},
- "actionId": {"en": "Action ID", "fr": "ID de l'action"},
- "actionMethod": {"en": "Action Method", "fr": "Méthode de l'action"},
- "actionName": {"en": "Action Name", "fr": "Nom de l'action"},
- "is_retry": {"en": "Is Retry", "fr": "Est une nouvelle tentative"},
- "previous_error": {"en": "Previous Error", "fr": "Erreur précédente"},
- "applied_improvements": {"en": "Applied Improvements", "fr": "Améliorations appliquées"}
+ "resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"}
}
)
@@ -536,21 +504,102 @@ class TaskStep(BaseModel, ModelMixin):
success_criteria: Optional[list[str]] = []
estimated_complexity: Optional[str] = None
+class TaskHandover(BaseModel, ModelMixin):
+ """Structured handover between workflow phases and tasks"""
+ taskId: str = Field(description="Target task ID")
+ sourceTask: Optional[str] = Field(None, description="Source task ID")
+
+ # Document handovers
+ inputDocuments: List[DocumentExchange] = Field(default_factory=list, description="Available input documents")
+ outputDocuments: List[DocumentExchange] = Field(default_factory=list, description="Produced output documents")
+
+ # Context and state
+ context: Dict[str, Any] = Field(default_factory=dict, description="Task context")
+ previousResults: List[str] = Field(default_factory=list, description="Previous result summaries")
+ improvements: List[str] = Field(default_factory=list, description="Improvement suggestions")
+
+ # Workflow context
+ workflowSummary: Optional[str] = Field(None, description="Summarized workflow context")
+ messageHistory: List[str] = Field(default_factory=list, description="Key message summaries")
+
+ # Metadata
+ timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC), description="When the handover was created")
+ handoverType: str = Field(default="task", description="Type of handover: task, phase, or workflow")
+
+ def addInputDocument(self, documentExchange: DocumentExchange) -> None:
+ """Add an input document exchange"""
+ self.inputDocuments.append(documentExchange)
+
+ def addOutputDocument(self, documentExchange: DocumentExchange) -> None:
+ """Add an output document exchange"""
+ self.outputDocuments.append(documentExchange)
+
+ def getDocumentsForAction(self, actionId: str) -> List[DocumentExchange]:
+ """Get all document exchanges relevant for a specific action"""
+ relevant = []
+ for doc_exchange in self.inputDocuments + self.outputDocuments:
+ if doc_exchange.isForAction(actionId):
+ relevant.append(doc_exchange)
+ return relevant
+
+# Register labels for TaskHandover
+register_model_labels(
+ "TaskHandover",
+ {"en": "Task Handover", "fr": "Transfert de tâche"},
+ {
+ "taskId": {"en": "Task ID", "fr": "ID de la tâche"},
+ "sourceTask": {"en": "Source Task", "fr": "Tâche source"},
+ "inputDocuments": {"en": "Input Documents", "fr": "Documents d'entrée"},
+ "outputDocuments": {"en": "Output Documents", "fr": "Documents de sortie"},
+ "context": {"en": "Context", "fr": "Contexte"},
+ "previousResults": {"en": "Previous Results", "fr": "Résultats précédents"},
+ "improvements": {"en": "Improvements", "fr": "Améliorations"},
+ "workflowSummary": {"en": "Workflow Summary", "fr": "Résumé du workflow"},
+ "messageHistory": {"en": "Message History", "fr": "Historique des messages"},
+ "timestamp": {"en": "Timestamp", "fr": "Horodatage"},
+ "handoverType": {"en": "Handover Type", "fr": "Type de transfert"}
+ }
+)
class TaskContext(BaseModel, ModelMixin):
task_step: TaskStep
workflow: Optional['ChatWorkflow'] = None
workflow_id: Optional[str] = None
+
+ # Available resources
available_documents: Optional[list[str]] = []
+ available_connections: Optional[list[str]] = []
+
+ # Previous execution state
previous_results: Optional[list[str]] = []
+ previous_handover: Optional[TaskHandover] = None
+
+ # Current execution state
improvements: Optional[list[str]] = []
retry_count: Optional[int] = 0
previous_action_results: Optional[list] = []
previous_review_result: Optional[dict] = None
is_regeneration: Optional[bool] = False
+
+ # Failure analysis
failure_patterns: Optional[list[str]] = []
failed_actions: Optional[list] = []
successful_actions: Optional[list] = []
+
+ def getDocumentReferences(self) -> List[str]:
+ """Get all available document references"""
+ docs = self.available_documents or []
+ if self.previous_handover:
+ for doc_exchange in self.previous_handover.inputDocuments:
+ docs.extend(doc_exchange.documents)
+ return list(set(docs)) # Remove duplicates
+
+ def addImprovement(self, improvement: str) -> None:
+ """Add an improvement suggestion"""
+ if improvement not in (self.improvements or []):
+ if self.improvements is None:
+ self.improvements = []
+ self.improvements.append(improvement)
class ReviewContext(BaseModel, ModelMixin):
task_step: TaskStep
@@ -582,3 +631,5 @@ class WorkflowResult(BaseModel, ModelMixin):
final_results_count: int
error: Optional[str] = None
phase: Optional[str] = None
+
+
diff --git a/modules/methods/EXCLUDED_methodCoder.py b/modules/methods/EXCLUDED_methodCoder.py
deleted file mode 100644
index c935b1a1..00000000
--- a/modules/methods/EXCLUDED_methodCoder.py
+++ /dev/null
@@ -1,327 +0,0 @@
-from typing import Dict, Any, Optional, List
-import logging
-import uuid
-from datetime import datetime, UTC
-
-from modules.chat.methodBase import MethodBase, ActionResult, action
-
-logger = logging.getLogger(__name__)
-
-class MethodCoder(MethodBase):
- """Coder method implementation for code operations"""
-
- def __init__(self, serviceCenter: Any):
- """Initialize the coder method"""
- super().__init__(serviceCenter)
- self.name = "coder"
- self.description = "Handle code operations like analysis, generation, and refactoring"
-
- @action
- async def analyze(self, parameters: Dict[str, Any]) -> ActionResult:
- """
- Analyze code quality and structure
-
- Parameters:
- documentList (str): Reference to the document list to analyze
- aiPrompt (str): AI prompt for code analysis
- language (str, optional): Programming language (default: "python")
- checks (List[str], optional): Types of checks to perform (default: ["complexity", "style", "security"])
- """
- try:
- documentList = parameters.get("documentList")
- aiPrompt = parameters.get("aiPrompt")
- language = parameters.get("language", "python")
- checks = parameters.get("checks", ["complexity", "style", "security"])
-
- if not documentList:
- return self._createResult(
- success=False,
- data={},
- error="Document list reference is required"
- )
-
- if not aiPrompt:
- return self._createResult(
- success=False,
- data={},
- error="AI prompt is required"
- )
-
- # Handle new document list format (list of strings)
- chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
- if not chatDocuments:
- return self._createResult(
- success=False,
- data={},
- error="No documents found for the provided reference"
- )
-
- # Process each document individually
- all_code_content = []
-
- for chatDocument in chatDocuments:
- fileId = chatDocument.fileId
- code = self.service.getFileData(fileId)
- file_info = self.service.getFileInfo(fileId)
-
- if not code:
- logger.warning(f"Code file is empty for fileId: {fileId}")
- continue
-
- # Use AI prompt to extract relevant code content
- extracted_content = await self.service.extractContentFromFileData(
- prompt=aiPrompt,
- fileData=code,
- filename=file_info.get('name', 'code'),
- mimeType=file_info.get('mimeType', 'text/plain'),
- base64Encoded=False
- )
-
- all_code_content.append(extracted_content)
-
- if not all_code_content:
- return self._createResult(
- success=False,
- data={},
- error="No code content could be extracted from any documents"
- )
-
- # Extract text content from ExtractedContent objects
- text_contents = []
- for content_obj in all_code_content:
- if hasattr(content_obj, 'contents') and content_obj.contents:
- # Extract text from ContentItem objects
- for content_item in content_obj.contents:
- if hasattr(content_item, 'data') and content_item.data:
- text_contents.append(content_item.data)
- elif isinstance(content_obj, str):
- text_contents.append(content_obj)
- else:
- # Fallback: convert to string representation
- text_contents.append(str(content_obj))
-
- # Combine all extracted text content for analysis
- combined_content = "\n\n--- CODE SEPARATOR ---\n\n".join(text_contents)
-
- # Create analysis prompt
- analysis_prompt = f"""
- Analyze this {language} code for quality, structure, and potential issues.
-
- Code to analyze:
- {combined_content}
-
- Please check for:
- {', '.join(checks)}
-
- Provide a detailed analysis including:
- 1. Code quality assessment
- 2. Potential issues and improvements
- 3. Security considerations
- 4. Performance optimizations
- 5. Best practices compliance
- 6. Summary of findings across all documents
- """
-
- # Use AI service for analysis
- analysis_result = await self.service.interfaceAiCalls.callAiTextAdvanced(analysis_prompt)
-
- # Create result data
- result_data = {
- "documentCount": len(chatDocuments),
- "language": language,
- "checks": checks,
- "analysis": analysis_result,
- "timestamp": datetime.now(UTC).isoformat()
- }
-
- return self._createResult(
- success=True,
- data={
- "documents": [
- {
- "documentName": f"code_analysis_{datetime.now(UTC).strftime('%Y%m%d_%H%M%S')}.json",
- "documentData": result_data
- }
- ]
- }
- )
-
- except Exception as e:
- logger.error(f"Error analyzing code: {str(e)}")
- return self._createResult(
- success=False,
- data={},
- error=str(e)
- )
-
- @action
- async def generate(self, parameters: Dict[str, Any]) -> ActionResult:
- """
- Generate code based on requirements
-
- Parameters:
- requirements (str): Requirements for the code to generate
- language (str, optional): Programming language (default: "python")
- template (str, optional): Template or pattern to follow
- """
- try:
- requirements = parameters.get("requirements")
- language = parameters.get("language", "python")
- template = parameters.get("template")
-
- if not requirements:
- return self._createResult(
- success=False,
- data={},
- error="Requirements are required"
- )
-
- # Create generation prompt
- generation_prompt = f"""
- Generate {language} code based on the following requirements:
-
- Requirements:
- {requirements}
-
- {f'Template to follow: {template}' if template else ''}
-
- Please provide:
- 1. Complete, working code
- 2. Clear comments and documentation
- 3. Error handling where appropriate
- 4. Best practices implementation
- """
-
- # Use AI service for code generation
- generated_code = await self.service.interfaceAiCalls.callAiTextAdvanced(generation_prompt)
-
- # Create result data
- result_data = {
- "language": language,
- "requirements": requirements,
- "code": generated_code,
- "timestamp": datetime.now(UTC).isoformat()
- }
-
- return self._createResult(
- success=True,
- data={
- "documentName": f"generated_code_{datetime.now(UTC).strftime('%Y%m%d_%H%M%S')}.{language}",
- "documentData": result_data
- }
- )
-
- except Exception as e:
- logger.error(f"Error generating code: {str(e)}")
- return self._createResult(
- success=False,
- data={},
- error=str(e)
- )
-
- @action
- async def refactor(self, parameters: Dict[str, Any]) -> ActionResult:
- """
- Refactor code for better quality
-
- Parameters:
- documentList (str): Reference to the document list to refactor
- aiImprovementPrompt (str): AI prompt for code improvements
- language (str, optional): Programming language (default: "python")
- """
- try:
- documentList = parameters.get("documentList")
- aiImprovementPrompt = parameters.get("aiImprovementPrompt")
- language = parameters.get("language", "python")
-
- if not documentList:
- return self._createResult(
- success=False,
- data={},
- error="Document list reference is required"
- )
-
- if not aiImprovementPrompt:
- return self._createResult(
- success=False,
- data={},
- error="AI improvement prompt is required"
- )
-
- # Handle new document list format (list of strings)
- chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
- if not chatDocuments:
- return self._createResult(
- success=False,
- data={},
- error="No documents found for the provided reference"
- )
-
- # Process each document individually
- refactored_results = []
-
- for chatDocument in chatDocuments:
- fileId = chatDocument.fileId
- code = self.service.getFileData(fileId)
- file_info = self.service.getFileInfo(fileId)
-
- if not code:
- logger.warning(f"Code file is empty for fileId: {fileId}")
- continue
-
- # Create refactoring prompt for this specific document
- refactor_prompt = f"""
- Refactor this {language} code based on the following improvement requirements:
-
- Improvement requirements:
- {aiImprovementPrompt}
-
- Original code:
- {code}
-
- Please provide:
- 1. Refactored code with improvements
- 2. Explanation of changes made
- 3. Benefits of the refactoring
- 4. Any potential trade-offs
- """
-
- # Use AI service for refactoring
- refactored_code = await self.service.interfaceAiCalls.callAiTextAdvanced(refactor_prompt)
-
- refactored_results.append({
- "original_file": file_info.get('name', 'unknown'),
- "original_code": code,
- "refactored_code": refactored_code
- })
-
- if not refactored_results:
- return self._createResult(
- success=False,
- data={},
- error="No code could be refactored from any documents"
- )
-
- # Create result data
- result_data = {
- "documentCount": len(chatDocuments),
- "language": language,
- "refactored_results": refactored_results,
- "timestamp": datetime.now(UTC).isoformat()
- }
-
- return self._createResult(
- success=True,
- data={
- "documentName": f"refactored_code_{datetime.now(UTC).strftime('%Y%m%d_%H%M%S')}.{language}",
- "documentData": result_data
- }
- )
-
- except Exception as e:
- logger.error(f"Error refactoring code: {str(e)}")
- return self._createResult(
- success=False,
- data={},
- error=str(e)
- )
\ No newline at end of file
diff --git a/modules/methods/methodAi.py b/modules/methods/methodAi.py
index b23db80b..3e5bcad7 100644
--- a/modules/methods/methodAi.py
+++ b/modules/methods/methodAi.py
@@ -5,10 +5,10 @@ Handles direct AI calls for any type of task.
import logging
from typing import Dict, Any, List, Optional
-import uuid
from datetime import datetime, UTC
-from modules.chat.methodBase import MethodBase, ActionResult, action
+from modules.chat.methodBase import MethodBase, action
+from modules.interfaces.interfaceChatModel import ActionResult
logger = logging.getLogger(__name__)
@@ -43,9 +43,7 @@ class MethodAi(MethodBase):
customInstructions = parameters.get("customInstructions", "")
if not aiPrompt:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="AI prompt is required"
)
@@ -56,27 +54,79 @@ class MethodAi(MethodBase):
if chatDocuments:
context_parts = []
for doc in chatDocuments:
- fileId = doc.fileId
- file_data = self.service.getFileData(fileId)
- file_info = self.service.getFileInfo(fileId)
+ file_info = self.service.getFileInfo(doc.fileId)
- if file_data:
- try:
- # Try to decode as text for context
- content = file_data.decode('utf-8')
- metadata_info = ""
- if file_info and includeMetadata:
- metadata_info = f" (Size: {file_info.get('fileSize', 'unknown')}, Type: {file_info.get('mimeType', 'unknown')})"
+ try:
+ # Use the document content extraction service with the specific AI prompt context
+ # This tells the extraction engine exactly what and how to extract
+ extraction_prompt = f"""
+ Extract content from this document for AI processing context.
+
+ AI Task: {aiPrompt}
+ Processing Mode: {processingMode}
+ Expected Output: {output_extension.upper()} format
+
+ Requirements:
+ 1. Extract the most relevant text content that would be useful for the AI task
+ 2. Focus on content that directly relates to: {aiPrompt}
+ 3. Include key information, data, and insights that the AI needs
+ 4. Provide clean, readable text without formatting artifacts
+
+ Document: {doc.filename}
+ """
+
+ logger.debug(f"Extracting content from {doc.filename} with task-specific prompt: {extraction_prompt[:100]}...")
+
+ extracted_content = await self.service.extractContentFromDocument(
+ prompt=extraction_prompt.strip(),
+ document=doc
+ )
+
+ if extracted_content and extracted_content.contents:
+ # Get the first content item's data
+ content = ""
+ for content_item in extracted_content.contents:
+ if hasattr(content_item, 'data') and content_item.data:
+ content += content_item.data + " "
+
- # Adjust context length based on processing mode
- max_length = 5000 if processingMode == "detailed" else 3000 if processingMode == "advanced" else 2000
- context_parts.append(f"Document: {doc.filename}{metadata_info}\nContent:\n{content[:max_length]}...")
- except UnicodeDecodeError:
- context_parts.append(f"Document: {doc.filename} [Binary content]")
+ if content.strip():
+ metadata_info = ""
+ if file_info and includeMetadata:
+ metadata_info = f" (Size: {file_info.get('fileSize', 'unknown')}, Type: {file_info.get('mimeType', 'unknown')})"
+
+ # Adjust context length based on processing mode and AI task relevance
+ base_length = 5000 if processingMode == "detailed" else 3000 if processingMode == "advanced" else 2000
+
+ # For detailed mode, include more context
+ if processingMode == "detailed":
+ context_parts.append(f"Document: {doc.filename}{metadata_info}\nRelevance to AI Task: This document contains content directly related to '{aiPrompt[:100]}...'\nContent:\n{content[:base_length]}...")
+ else:
+ context_parts.append(f"Document: {doc.filename}{metadata_info}\nContent:\n{content[:base_length]}...")
+ else:
+ context_parts.append(f"Document: {doc.filename} [No readable text content - binary file]")
+ else:
+ context_parts.append(f"Document: {doc.filename} [No readable text content - binary file]")
+
+ except Exception as extract_error:
+ context_parts.append(f"Document: {doc.filename} [Could not extract content - binary file]")
if context_parts:
- context = "\n\n".join(context_parts)
- logger.info(f"Included {len(chatDocuments)} documents in AI context")
+ # Add a summary header to help the AI understand the context
+ context_header = f"""
+ === DOCUMENT CONTEXT FOR AI PROCESSING ===
+ AI Task: {aiPrompt[:100]}...
+ Processing Mode: {processingMode}
+ Expected Output Format: {output_extension.upper()}
+ Total Documents: {len(chatDocuments)}
+
+ The following documents contain content relevant to your task.
+ Use this information to provide the most accurate and helpful response.
+ ================================================
+ """
+
+ context = context_header + "\n\n" + "\n\n".join(context_parts)
+ logger.info(f"Included {len(chatDocuments)} documents in AI context with task-specific extraction")
# Determine output format
output_extension = ".txt" # Default
@@ -126,39 +176,23 @@ class MethodAi(MethodBase):
timestamp = datetime.now(UTC).strftime('%Y%m%d_%H%M%S')
filename = f"ai_{processingMode}_{timestamp}{output_extension}"
- # Create document through service (but don't add to workflow - let calling layer handle that)
- document = self.service.createDocument(
- fileName=filename,
- mimeType=output_mime_type,
- content=result,
- base64encoded=False
- )
+
- return self._createResult(
- success=True,
- data={
- "result": result,
- "filename": filename,
- "documentId": document.id if hasattr(document, 'id') else None,
- "processedDocuments": len(documentList) if documentList else 0,
- "processingMode": processingMode,
- "document": document # Include the created document in the result data
- },
- metadata={
- "method": "ai.process",
- "promptLength": len(aiPrompt),
- "contextLength": len(context),
- "outputFormat": output_extension,
- "includeMetadata": includeMetadata,
- "processingMode": processingMode,
- "hasCustomInstructions": bool(customInstructions)
- }
+ # Return result in the standard ActionResult format
+ return ActionResult.success(
+ documents=[{
+ "documentName": filename,
+ "documentData": {
+ "result": result,
+ "filename": filename,
+ "processedDocuments": len(documentList) if documentList else 0
+ },
+ "mimeType": output_mime_type
+ }]
)
except Exception as e:
- logger.error(f"Error in ai.process: {str(e)}")
- return self._createResult(
- success=False,
- data={},
- error=f"AI processing failed: {str(e)}"
+ logger.error(f"Error in AI processing: {str(e)}")
+ return ActionResult.failure(
+ error=str(e)
)
diff --git a/modules/methods/methodDocument.py b/modules/methods/methodDocument.py
index 7bc899f5..3d52af85 100644
--- a/modules/methods/methodDocument.py
+++ b/modules/methods/methodDocument.py
@@ -7,7 +7,8 @@ import logging
from typing import Dict, Any, List, Optional
from datetime import datetime, UTC
-from modules.chat.methodBase import MethodBase, ActionResult, action
+from modules.chat.methodBase import MethodBase, action
+from modules.interfaces.interfaceChatModel import ActionResult
logger = logging.getLogger(__name__)
@@ -23,13 +24,13 @@ class MethodDocument(MethodBase):
@action
async def extract(self, parameters: Dict[str, Any]) -> ActionResult:
"""
- Extract specific content from document with AI prompt and return it in the specified format.
+ Extract content from any document using AI prompt.
Parameters:
- documentList (str): Reference to the document list to extract content from
- aiPrompt (str): AI prompt for content extraction
- expectedDocumentFormats (list, optional): Expected document formats with extension, mimeType, description
- includeMetadata (bool, optional): Whether to include metadata (default: True)
+ documentList (str): Document list reference
+ aiPrompt (str): AI prompt for extraction
+ expectedDocumentFormats (list, optional): Output formats
+ includeMetadata (bool, optional): Include metadata (default: True)
"""
try:
documentList = parameters.get("documentList")
@@ -38,24 +39,18 @@ class MethodDocument(MethodBase):
includeMetadata = parameters.get("includeMetadata", True)
if not documentList:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="Document list reference is required"
)
if not aiPrompt:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="AI prompt is required"
)
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
if not chatDocuments:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="No documents found for the provided reference"
)
@@ -64,31 +59,30 @@ class MethodDocument(MethodBase):
file_infos = []
for chatDocument in chatDocuments:
- fileId = chatDocument.fileId
- file_data = self.service.getFileData(fileId)
- file_info = self.service.getFileInfo(fileId)
+ file_info = self.service.getFileInfo(chatDocument.fileId)
- if not file_data:
- logger.warning(f"File not found or empty for fileId: {fileId}")
+ try:
+ # Use the document content extraction service with the specific AI prompt
+ # This handles all document types (text, binary, image, etc.) intelligently
+ extracted_content = await self.service.extractContentFromDocument(
+ prompt=aiPrompt,
+ document=chatDocument
+ )
+
+ if extracted_content and extracted_content.contents:
+ all_extracted_content.append(extracted_content)
+ if includeMetadata:
+ file_infos.append(file_info)
+ logger.info(f"Successfully extracted content from {chatDocument.filename}")
+ else:
+ logger.warning(f"No content extracted from {chatDocument.filename}")
+
+ except Exception as e:
+ logger.error(f"Error extracting content from {chatDocument.filename}: {str(e)}")
continue
-
- extracted_content = await self.service.extractContentFromFileData(
- prompt=aiPrompt,
- fileData=file_data,
- filename=file_info.get('name', 'document'),
- mimeType=file_info.get('mimeType', 'application/octet-stream'),
- base64Encoded=False,
- documentId=chatDocument.id
- )
-
- all_extracted_content.append(extracted_content)
- if includeMetadata:
- file_infos.append(file_info)
if not all_extracted_content:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="No content could be extracted from any documents"
)
@@ -164,31 +158,25 @@ class MethodDocument(MethodBase):
"mimeType": final_mime_type
})
- return self._createResult(
- success=True,
- data={
- "documents": output_documents
- }
+ return ActionResult.success(
+ documents=output_documents
)
except Exception as e:
logger.error(f"Error extracting content: {str(e)}")
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error=str(e)
)
@action
async def generate(self, parameters: Dict[str, Any]) -> ActionResult:
"""
- Generate documents in specific formats from document references.
- This action automatically extracts content from documents and converts it to the specified format.
+ Convert TEXT-ONLY documents to target formats (NO AI usage).
Parameters:
- documentList (list): List of document references to extract content from
- expectedDocumentFormats (list): Expected document formats with extension, mimeType, description
- originalDocuments (list, optional): List of original document names
- includeMetadata (bool, optional): Whether to include metadata (default: True)
+ documentList (list): TEXT-ONLY documents only
+ expectedDocumentFormats (list): Target formats
+ originalDocuments (list, optional): Original names
+ includeMetadata (bool, optional): Include metadata (default: True)
"""
try:
document_list = parameters.get("documentList", [])
@@ -197,16 +185,12 @@ class MethodDocument(MethodBase):
include_metadata = parameters.get("includeMetadata", True)
if not document_list:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="Document list is required for generation"
)
if not expected_document_formats or len(expected_document_formats) == 0:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="Expected document formats specification is required"
)
@@ -215,9 +199,7 @@ class MethodDocument(MethodBase):
logger.info(f"Found {len(chat_documents)} chat documents")
if not chat_documents:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="No documents found for the provided documentList reference"
)
@@ -229,28 +211,42 @@ class MethodDocument(MethodBase):
output_documents = []
for i, chat_document in enumerate(chat_documents):
- # Extract content from this document
- # ChatDocument is just a reference, so we need to get file data using fileId
+ # Extract content from this document directly - NO AI, just read the data as-is
+ # This ensures we get the original text content for format conversion
content = ""
if hasattr(chat_document, 'fileId') and chat_document.fileId:
- # Need to get file data
- file_data = self.service.getFileData(chat_document.fileId)
- if file_data:
- if isinstance(file_data, bytes):
- content = file_data.decode('utf-8', errors='ignore')
+ try:
+ # Get file data directly without AI processing
+ file_data = self.service.getFileData(chat_document.fileId)
+ if file_data:
+ # Check if it's text data and convert to string
+ if isinstance(file_data, bytes):
+ try:
+ # Try to decode as UTF-8 to check if it's text
+ content = file_data.decode('utf-8')
+ logger.info(f"Document {i+1} ({chat_document.filename}): Successfully decoded as UTF-8 text")
+ except UnicodeDecodeError:
+ logger.info(f"Document {i+1} ({chat_document.filename}): Binary data, not text - skipping")
+ continue
+ else:
+ # Already a string
+ content = str(file_data)
+ logger.info(f"Document {i+1} ({chat_document.filename}): Already text data")
else:
- content = str(file_data)
- else:
- logger.warning(f"Could not get file data for document {i+1}, skipping")
+ logger.warning(f"Document {i+1} ({chat_document.filename}): No file data found")
+ continue
+
+ if not content.strip():
+ logger.info(f"Document {i+1} ({chat_document.filename}): Empty text content, skipping")
+ continue
+
+ except Exception as e:
+ logger.warning(f"Error reading document {i+1} ({chat_document.filename}): {str(e)}")
continue
else:
logger.warning(f"Document {i+1} has no fileId, skipping")
continue
- if not content:
- logger.warning(f"Could not extract content from document {i+1}, skipping")
- continue
-
logger.info(f"Extracted content from document {i+1}: {len(content)} characters")
# Get the expected format for this document (or use default)
@@ -300,23 +296,16 @@ class MethodDocument(MethodBase):
})
if not output_documents:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="No documents could be generated"
)
- return self._createResult(
- success=True,
- data={
- "documents": output_documents
- }
+ return ActionResult.success(
+ documents=output_documents
)
except Exception as e:
logger.error(f"Error generating document: {str(e)}")
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error=str(e)
)
@@ -502,37 +491,40 @@ class MethodDocument(MethodBase):
@action
async def generateReport(self, parameters: Dict[str, Any]) -> ActionResult:
"""
- Generate a comprehensive, professional HTML report from multiple documents, consolidating and summarizing all findings using AI.
+ Generate HTML report from multiple documents using AI.
Parameters:
- documentList (str): Reference to the document list to create the report from
- title (str, optional): Title for the report (default: "Summary Report")
- includeMetadata (bool, optional): Whether to include metadata (default: True)
+ documentList (str): Document list reference
+ prompt (str): AI prompt for report generation
+ title (str, optional): Report title (default: "Summary Report")
+ includeMetadata (bool, optional): Include metadata (default: True)
"""
try:
documentList = parameters.get("documentList")
+ prompt = parameters.get("prompt")
title = parameters.get("title", "Summary Report")
includeMetadata = parameters.get("includeMetadata", True)
if not documentList:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="Document list reference is required"
)
+ if not prompt:
+ return ActionResult.failure(
+ error="Prompt is required to specify what kind of report to generate"
+ )
+
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
logger.info(f"Retrieved {len(chatDocuments)} chat documents for report generation")
if not chatDocuments:
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error="No documents found for the provided reference"
)
# Generate HTML report
- html_content = await self._generateHtmlReport(chatDocuments, title, includeMetadata)
+ html_content = await self._generateHtmlReport(chatDocuments, title, includeMetadata, prompt)
# Create output filename
timestamp = datetime.now(UTC).strftime('%Y%m%d_%H%M%S')
@@ -547,25 +539,20 @@ class MethodDocument(MethodBase):
logger.info(f"Generated HTML report: {output_filename} with {len(html_content)} characters")
- return self._createResult(
- success=True,
- data={
- "documents": [{
- "documentName": output_filename,
- "documentData": result_data,
- "mimeType": "text/html"
- }]
- }
+ return ActionResult.success(
+ documents=[{
+ "documentName": output_filename,
+ "documentData": result_data,
+ "mimeType": "text/html"
+ }]
)
except Exception as e:
logger.error(f"Error generating report: {str(e)}")
- return self._createResult(
- success=False,
- data={},
+ return ActionResult.failure(
error=str(e)
)
- async def _generateHtmlReport(self, chatDocuments: List[Any], title: str, includeMetadata: bool) -> str:
+ async def _generateHtmlReport(self, chatDocuments: List[Any], title: str, includeMetadata: bool, prompt: str) -> str:
"""
Generate a comprehensive HTML report using AI from all input documents.
"""
@@ -578,28 +565,35 @@ class MethodDocument(MethodBase):
content = ""
logger.info(f"Processing document: type={type(doc)}")
- # Get actual file content using the fileId reference
+ # Get actual file content using the document content extraction service
try:
- file_data = self.service.getFileData(doc.fileId)
- if file_data:
- # Convert bytes to string
- if isinstance(file_data, bytes):
- content = file_data.decode('utf-8')
+ extracted_content = await self.service.extractContentFromDocument(
+ prompt="Extract readable text content for HTML report generation",
+ document=doc
+ )
+
+ if extracted_content and extracted_content.contents:
+ # Get the first content item's data
+ for content_item in extracted_content.contents:
+ if hasattr(content_item, 'data') and content_item.data:
+ content += content_item.data + " "
+
+ if content.strip():
+ logger.info(f" Retrieved content from file: {len(content)} characters")
else:
- content = str(file_data)
- logger.info(f" Retrieved content from file: {len(content)} characters")
+ logger.info(f" No readable text content found (binary file)")
else:
- logger.warning(f" No file data found for fileId: {doc.fileId}")
+ logger.info(f" No content extracted (binary file)")
except Exception as e:
- logger.error(f" Error retrieving file data: {str(e)}")
+ logger.info(f" Could not extract content (binary file): {str(e)}")
# Skip empty documents
- if content:
+ if content and content.strip():
validDocuments.append(doc)
allContent.append(f"Document: {doc.filename}\n{content}\n")
logger.info(f" Added document to valid documents list")
else:
- logger.warning(f" Skipping document with no content")
+ logger.info(f" Skipping document with no readable text content")
if not validDocuments:
# If no valid documents, create a simple report
@@ -610,14 +604,14 @@ class MethodDocument(MethodBase):
html.append("