""" AI processing method module. Handles direct AI calls for any type of task. """ import logging from typing import Dict, Any, List, Optional import uuid from datetime import datetime, UTC from modules.chat.methodBase import MethodBase, ActionResult, action logger = logging.getLogger(__name__) class MethodAi(MethodBase): """AI method implementation for direct AI processing""" def __init__(self, serviceCenter: Any): """Initialize the AI method""" super().__init__(serviceCenter) self.name = "ai" self.description = "Handle direct AI processing for any type of task" @action async def process(self, parameters: Dict[str, Any]) -> ActionResult: """ Perform an AI call for any type of task with optional document references Parameters: aiPrompt (str): The AI prompt for processing documentList (list, optional): List of document references to include in context expectedDocumentFormats (list, optional): Expected output formats with extension, mimeType, description processingMode (str, optional): Processing mode ('basic', 'advanced', 'detailed') - defaults to 'basic' includeMetadata (bool, optional): Whether to include metadata (default: True) customInstructions (str, optional): Additional custom instructions for the AI """ try: aiPrompt = parameters.get("aiPrompt") documentList = parameters.get("documentList", []) expectedDocumentFormats = parameters.get("expectedDocumentFormats", []) processingMode = parameters.get("processingMode", "basic") includeMetadata = parameters.get("includeMetadata", True) customInstructions = parameters.get("customInstructions", "") if not aiPrompt: return self._createResult( success=False, data={}, error="AI prompt is required" ) # Build context from documents if provided context = "" if documentList: chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList) if chatDocuments: context_parts = [] for doc in chatDocuments: fileId = doc.fileId file_data = self.service.getFileData(fileId) file_info = self.service.getFileInfo(fileId) if file_data: try: # Try to decode as text for context content = file_data.decode('utf-8') metadata_info = "" if file_info and includeMetadata: metadata_info = f" (Size: {file_info.get('fileSize', 'unknown')}, Type: {file_info.get('mimeType', 'unknown')})" # Adjust context length based on processing mode max_length = 5000 if processingMode == "detailed" else 3000 if processingMode == "advanced" else 2000 context_parts.append(f"Document: {doc.filename}{metadata_info}\nContent:\n{content[:max_length]}...") except UnicodeDecodeError: context_parts.append(f"Document: {doc.filename} [Binary content]") if context_parts: context = "\n\n".join(context_parts) logger.info(f"Included {len(chatDocuments)} documents in AI context") # Determine output format output_extension = ".txt" # Default output_mime_type = "text/plain" # Default if expectedDocumentFormats and len(expectedDocumentFormats) > 0: expected_format = expectedDocumentFormats[0] output_extension = expected_format.get("extension", ".txt") output_mime_type = expected_format.get("mimeType", "text/plain") logger.info(f"Using expected format: {output_extension} ({output_mime_type})") # Build enhanced prompt enhanced_prompt = aiPrompt # Add processing mode instructions if specified (generic, not analysis-specific) if processingMode == "detailed": enhanced_prompt += "\n\nPlease provide a detailed response with comprehensive information." elif processingMode == "advanced": enhanced_prompt += "\n\nPlease provide an advanced response with deep insights." # Add custom instructions if provided if customInstructions: enhanced_prompt += f"\n\nAdditional Instructions: {customInstructions}" # Add format-specific instructions only if non-text format is requested if output_extension != ".txt": if output_extension == ".csv": enhanced_prompt += f"\n\nCRITICAL: Deliver the result as pure CSV data without any markdown formatting, code blocks, or additional text. Output only the CSV content with proper headers and data rows." elif output_extension == ".json": enhanced_prompt += f"\n\nCRITICAL: Deliver the result as pure JSON data without any markdown formatting, code blocks, or additional text. Output only the JSON content." elif output_extension == ".xml": enhanced_prompt += f"\n\nCRITICAL: Deliver the result as pure XML data without any markdown formatting, code blocks, or additional text. Output only the XML content." else: enhanced_prompt += f"\n\nCRITICAL: Deliver the result as pure {output_extension.upper()} data without any markdown formatting, code blocks, or additional text." # Call appropriate AI service based on processing mode logger.info(f"Executing AI call with mode: {processingMode}, prompt length: {len(enhanced_prompt)}") if context: logger.info(f"Including context from {len(documentList)} documents") if processingMode in ["advanced", "detailed"]: result = await self.service.callAiTextAdvanced(enhanced_prompt, context) else: result = await self.service.callAiTextBasic(enhanced_prompt, context) # Create result document timestamp = datetime.now(UTC).strftime('%Y%m%d_%H%M%S') filename = f"ai_{processingMode}_{timestamp}{output_extension}" # Create document through service (but don't add to workflow - let calling layer handle that) document = self.service.createDocument( fileName=filename, mimeType=output_mime_type, content=result, base64encoded=False ) return self._createResult( success=True, data={ "result": result, "filename": filename, "documentId": document.id if hasattr(document, 'id') else None, "processedDocuments": len(documentList) if documentList else 0, "processingMode": processingMode, "document": document # Include the created document in the result data }, metadata={ "method": "ai.process", "promptLength": len(aiPrompt), "contextLength": len(context), "outputFormat": output_extension, "includeMetadata": includeMetadata, "processingMode": processingMode, "hasCustomInstructions": bool(customInstructions) } ) except Exception as e: logger.error(f"Error in ai.process: {str(e)}") return self._createResult( success=False, data={}, error=f"AI processing failed: {str(e)}" )