266 lines
9.2 KiB
Python
266 lines
9.2 KiB
Python
"""
|
|
ServiceCenter integration with Smart AI Engine
|
|
"""
|
|
|
|
import logging
|
|
from typing import List, Dict, Any, Optional
|
|
from modules.interfaces.interfaceChatModel import ChatDocument
|
|
from modules.interfaces.interfaceAiEngine import (
|
|
AIRequest, AIResponse, AIModelType, ProcessingStrategy,
|
|
ContentReductionStrategy
|
|
)
|
|
from modules.engines.aiEngine import SmartAIEngine
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class ServiceCenterAIEngine:
|
|
"""ServiceCenter integration with Smart AI Engine"""
|
|
|
|
def __init__(self, service_center):
|
|
self.service_center = service_center
|
|
self.ai_engine = SmartAIEngine(service_center)
|
|
|
|
async def callAiWithDocuments(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument] = None,
|
|
context: str = None,
|
|
preferred_model: AIModelType = None,
|
|
operation_type: str = "general",
|
|
processing_strategy: ProcessingStrategy = None,
|
|
reduction_strategy: ContentReductionStrategy = None,
|
|
**kwargs
|
|
) -> str:
|
|
"""
|
|
Unified AI call method that handles documents and prompts separately
|
|
|
|
Args:
|
|
prompt: The AI prompt
|
|
documents: List of documents to process
|
|
context: Additional context
|
|
preferred_model: Preferred AI model
|
|
operation_type: Type of operation (for strategy selection)
|
|
processing_strategy: Explicit processing strategy
|
|
reduction_strategy: Explicit content reduction strategy
|
|
**kwargs: Additional parameters
|
|
|
|
Returns:
|
|
AI response content
|
|
"""
|
|
try:
|
|
# Create AI request
|
|
request = AIRequest(
|
|
prompt=prompt,
|
|
documents=documents or [],
|
|
context=context,
|
|
preferred_model=preferred_model,
|
|
processing_strategy=processing_strategy,
|
|
reduction_strategy=reduction_strategy,
|
|
metadata={
|
|
"operation_type": operation_type,
|
|
**kwargs
|
|
}
|
|
)
|
|
|
|
# Process request
|
|
response = await self.ai_engine.process_request(request)
|
|
|
|
if response.success:
|
|
return response.content
|
|
else:
|
|
raise Exception(f"AI processing failed: {response.error}")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in AI call with documents: {str(e)}")
|
|
raise e
|
|
|
|
# Convenience methods for different operation types
|
|
|
|
async def callAiForTaskPlanning(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument] = None,
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for task planning"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="task_planning",
|
|
preferred_model=AIModelType.ANTHROPIC_CLAUDE # Better for complex planning
|
|
)
|
|
|
|
async def callAiForActionDefinition(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument] = None,
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for action definition"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="action_definition",
|
|
preferred_model=AIModelType.ANTHROPIC_CLAUDE
|
|
)
|
|
|
|
async def callAiForDocumentExtraction(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument],
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for document extraction"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="document_extraction",
|
|
processing_strategy=ProcessingStrategy.DOCUMENT_BY_DOCUMENT
|
|
)
|
|
|
|
async def callAiForReportGeneration(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument],
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for report generation"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="report_generation",
|
|
processing_strategy=ProcessingStrategy.CHUNKED_PROCESSING
|
|
)
|
|
|
|
async def callAiForEmailComposition(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument] = None,
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for email composition"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="email_composition",
|
|
preferred_model=AIModelType.OPENAI_GPT4 # Better for creative writing
|
|
)
|
|
|
|
async def callAiForChatSummarization(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument] = None,
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for chat summarization"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="chat_summarization",
|
|
processing_strategy=ProcessingStrategy.SUMMARIZED_CONTENT
|
|
)
|
|
|
|
async def callAiForImageAnalysis(
|
|
self,
|
|
prompt: str,
|
|
documents: List[ChatDocument],
|
|
context: str = None
|
|
) -> str:
|
|
"""AI call optimized for image analysis"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
documents=documents,
|
|
context=context,
|
|
operation_type="image_analysis",
|
|
preferred_model=AIModelType.OPENAI_VISION,
|
|
requires_vision=True
|
|
)
|
|
|
|
# Backward compatibility methods
|
|
|
|
async def callAiTextAdvanced(self, prompt: str, context: str = None) -> str:
|
|
"""Backward compatibility method"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
context=context,
|
|
operation_type="general",
|
|
preferred_model=AIModelType.ANTHROPIC_CLAUDE
|
|
)
|
|
|
|
async def callAiTextBasic(self, prompt: str, context: str = None) -> str:
|
|
"""Backward compatibility method"""
|
|
return await self.callAiWithDocuments(
|
|
prompt=prompt,
|
|
context=context,
|
|
operation_type="general",
|
|
preferred_model=AIModelType.OPENAI_GPT35
|
|
)
|
|
|
|
async def callAiImageBasic(self, prompt: str, image_data: str, mime_type: str) -> str:
|
|
"""Backward compatibility method for image processing"""
|
|
# Create a document from image data
|
|
image_doc = self.service_center.createDocument(
|
|
"image_analysis.jpg",
|
|
mime_type,
|
|
image_data,
|
|
base64encoded=True
|
|
)
|
|
|
|
return await self.callAiForImageAnalysis(
|
|
prompt=prompt,
|
|
documents=[image_doc]
|
|
)
|
|
|
|
async def extractContentFromDocument(self, prompt: str, document: ChatDocument) -> str:
|
|
"""Enhanced document extraction using AI engine"""
|
|
try:
|
|
return await self.callAiForDocumentExtraction(
|
|
prompt=prompt,
|
|
documents=[document]
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error in enhanced document extraction: {str(e)}")
|
|
# Fall back to original method
|
|
from modules.interfaces.interfaceChatModel import ExtractedContent
|
|
extracted = await self.service_center.documentProcessor.processFileData(
|
|
fileData=self.service_center.getFileData(document.fileId),
|
|
fileName=document.fileName,
|
|
mimeType=document.mimeType,
|
|
prompt=prompt,
|
|
documentId=document.id
|
|
)
|
|
if extracted and extracted.contents:
|
|
return "\n".join([item.data for item in extracted.contents])
|
|
return ""
|
|
|
|
async def summarizeChat(self, messages: List) -> str:
|
|
"""Enhanced chat summarization using AI engine"""
|
|
try:
|
|
# Convert messages to a simple text format
|
|
chat_content = "\n".join([f"{msg.role}: {msg.message}" for msg in messages if hasattr(msg, 'message')])
|
|
|
|
# Create a document from chat content
|
|
chat_doc = self.service_center.createDocument(
|
|
"chat_history.txt",
|
|
"text/plain",
|
|
chat_content,
|
|
base64encoded=False
|
|
)
|
|
|
|
return await self.callAiForChatSummarization(
|
|
prompt="Summarize this chat conversation, focusing on key decisions, outcomes, and next steps.",
|
|
documents=[chat_doc]
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error in enhanced chat summarization: {str(e)}")
|
|
# Fall back to original method
|
|
return await self.service_center.callAiTextBasic(
|
|
f"Summarize this chat conversation: {chat_content}"
|
|
)
|