complete refactory of all dependencies
This commit is contained in:
parent
07a2d279df
commit
8be9211b28
65 changed files with 2768 additions and 2223 deletions
4
app.py
4
app.py
|
|
@ -291,9 +291,5 @@ app.include_router(googleRouter)
|
|||
from modules.routes.routeVoiceGoogle import router as voiceGoogleRouter
|
||||
app.include_router(voiceGoogleRouter)
|
||||
|
||||
from modules.routes.routeVoiceStreaming import router as voiceStreamingRouter
|
||||
app.include_router(voiceStreamingRouter)
|
||||
|
||||
# Admin security routes (token listing and revocation, logs, db tools)
|
||||
from modules.routes.routeSecurityAdmin import router as adminSecurityRouter
|
||||
app.include_router(adminSecurityRouter)
|
||||
|
|
@ -64,6 +64,13 @@ Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
|
|||
Connector_AiAnthropic_TEMPERATURE = 0.2
|
||||
Connector_AiAnthropic_MAX_TOKENS = 2000
|
||||
|
||||
# LangDoc configuration
|
||||
Connector_AiLangdoc_API_URL = https://api.langdock.com/v1/chat/completions
|
||||
Connector_AiLangdoc_API_SECRET = sk-9KaNH1FfEx7SkTijsFpXeTIc9_xOmoo7e0hW6SqrYavFq_bgjcULa7PXp3kWQpp4gfk8-U0B4L91CP6YpAJxZg
|
||||
Connector_AiLangdoc_MODEL_NAME = gpt-4o
|
||||
Connector_AiLangdoc_TEMPERATURE = 0.2
|
||||
Connector_AiLangdoc_MAX_TOKENS = 2000
|
||||
|
||||
# Agent Mail configuration
|
||||
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||
Service_MSFT_CLIENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEQk4xYnpmbnItUEU3dHU4eHB5dzVYay1WT012RTRLUWJDTlBILVY5dC1FX3VMNjZmLThrbDRFNWFSNGprY3RRTlpYNGlubVBpNnY3MjNJcGtzVk9PMzRacl9LUlM2RU5vTVVZWHJvaUhWSHVfc1pNR0pfQmI5SEprOG5KdlB1QnQ=
|
||||
|
|
|
|||
|
|
@ -64,6 +64,13 @@ Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
|
|||
Connector_AiAnthropic_TEMPERATURE = 0.2
|
||||
Connector_AiAnthropic_MAX_TOKENS = 2000
|
||||
|
||||
# LangDoc configuration
|
||||
Connector_AiLangdoc_API_URL = https://api.langdock.com/v1/chat/completions
|
||||
Connector_AiLangdoc_API_SECRET = sk-9KaNH1FfEx7SkTijsFpXeTIc9_xOmoo7e0hW6SqrYavFq_bgjcULa7PXp3kWQpp4gfk8-U0B4L91CP6YpAJxZg
|
||||
Connector_AiLangdoc_MODEL_NAME = gpt-4o
|
||||
Connector_AiLangdoc_TEMPERATURE = 0.2
|
||||
Connector_AiLangdoc_MAX_TOKENS = 2000
|
||||
|
||||
# Agent Mail configuration
|
||||
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||
Service_MSFT_CLIENT_SECRET = INT_ENC:Z0FBQUFBQm8xSVRjNzB2M3ZjaE1SVE9ON2FKam9yVURxcHl1Ym5VNVUtS0MyWUpNVXVlaWpWS2U3VVd3em9vQl9lcnVYay03bS04YjNBbDZZNTB4eUtjT3ppQjJjY3dOT0FNLW9LeDhIUU5iaTNqNURUWE5La3kzaHNGcU9yNVI0YjhWZTZRRFktcTk=
|
||||
|
|
|
|||
|
|
@ -64,6 +64,13 @@ Connector_AiAnthropic_MODEL_NAME = claude-3-5-sonnet-20241022
|
|||
Connector_AiAnthropic_TEMPERATURE = 0.2
|
||||
Connector_AiAnthropic_MAX_TOKENS = 2000
|
||||
|
||||
# LangDoc configuration
|
||||
Connector_AiLangdoc_API_URL = https://api.langdock.com/v1/chat/completions
|
||||
Connector_AiLangdoc_API_SECRET = sk-9KaNH1FfEx7SkTijsFpXeTIc9_xOmoo7e0hW6SqrYavFq_bgjcULa7PXp3kWQpp4gfk8-U0B4L91CP6YpAJxZg
|
||||
Connector_AiLangdoc_MODEL_NAME = gpt-4o
|
||||
Connector_AiLangdoc_TEMPERATURE = 0.2
|
||||
Connector_AiLangdoc_MAX_TOKENS = 2000
|
||||
|
||||
# Agent Mail configuration
|
||||
Service_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c
|
||||
Service_MSFT_CLIENT_SECRET = PROD_ENC:Z0FBQUFBQm8xSU5pVEhHdlZHU3FNMmhuRGVwaGc3YzIxSjlZNzBCQjlOV2pSYVNXb0t1ZnVwQzZsQzY4cHMtVlZtNF85OEVaV1BMTzdXMmpzaGZpaG1DalJ0bkNPMHA5ZUcwZjNDdGk1TFdxYTJSZnVrVmhhZ2VRUEZxbjJOOGFhWk9EYlY3dmRVTnI=
|
||||
|
|
|
|||
406
modules/connectors/connectorAiLangdoc.py
Normal file
406
modules/connectors/connectorAiLangdoc.py
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
import logging
|
||||
import httpx
|
||||
import asyncio
|
||||
import re
|
||||
from typing import Dict, Any, List, Union, Optional
|
||||
from fastapi import HTTPException
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
||||
# Configure logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def loadConfigData():
|
||||
"""Load configuration data for LangDoc connector"""
|
||||
return {
|
||||
"apiKey": APP_CONFIG.get('Connector_AiLangdoc_API_SECRET'),
|
||||
"apiUrl": APP_CONFIG.get('Connector_AiLangdoc_API_URL'),
|
||||
"modelName": APP_CONFIG.get('Connector_AiLangdoc_MODEL_NAME'),
|
||||
"temperature": float(APP_CONFIG.get('Connector_AiLangdoc_TEMPERATURE')),
|
||||
"maxTokens": int(APP_CONFIG.get('Connector_AiLangdoc_MAX_TOKENS'))
|
||||
}
|
||||
|
||||
class AiLangdoc:
|
||||
"""Connector for communication with the LangDoc API (OpenAI-compatible)."""
|
||||
|
||||
def __init__(self):
|
||||
# Load configuration
|
||||
self.config = loadConfigData()
|
||||
self.apiKey = self.config["apiKey"]
|
||||
self.apiUrl = self.config["apiUrl"]
|
||||
self.modelName = self.config["modelName"]
|
||||
|
||||
# HttpClient for API calls
|
||||
self.httpClient = httpx.AsyncClient(
|
||||
timeout=120.0, # Longer timeout for complex requests
|
||||
headers={
|
||||
"Authorization": f"Bearer {self.apiKey}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
)
|
||||
|
||||
logger.info(f"LangDoc Connector initialized with model: {self.modelName}")
|
||||
|
||||
async def callAiBasic(self, messages: List[Dict[str, Any]], temperature: float = None, maxTokens: int = None) -> str:
|
||||
"""
|
||||
Calls the LangDoc API with the given messages.
|
||||
|
||||
Args:
|
||||
messages: List of messages in OpenAI format (role, content)
|
||||
temperature: Temperature for response generation (0.0-1.0)
|
||||
maxTokens: Maximum number of tokens in the response
|
||||
|
||||
Returns:
|
||||
The response from the LangDoc API
|
||||
|
||||
Raises:
|
||||
HTTPException: For errors in API communication
|
||||
"""
|
||||
try:
|
||||
# Use parameters from configuration if none were overridden
|
||||
if temperature is None:
|
||||
temperature = self.config.get("temperature", 0.2)
|
||||
|
||||
if maxTokens is None:
|
||||
maxTokens = self.config.get("maxTokens", 2000)
|
||||
|
||||
payload = {
|
||||
"model": self.modelName,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
"max_tokens": maxTokens
|
||||
}
|
||||
|
||||
response = await self.httpClient.post(
|
||||
self.apiUrl,
|
||||
json=payload
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_detail = f"LangDoc API error: {response.status_code} - {response.text}"
|
||||
logger.error(error_detail)
|
||||
|
||||
# Provide more specific error messages based on status code
|
||||
if response.status_code == 429:
|
||||
error_message = "Rate limit exceeded. Please wait before making another request."
|
||||
elif response.status_code == 401:
|
||||
error_message = "Invalid API key. Please check your LangDoc API configuration."
|
||||
elif response.status_code == 400:
|
||||
error_message = f"Invalid request to LangDoc API: {response.text}"
|
||||
else:
|
||||
error_message = f"LangDoc API error ({response.status_code}): {response.text}"
|
||||
|
||||
raise HTTPException(status_code=500, detail=error_message)
|
||||
|
||||
responseJson = response.json()
|
||||
content = responseJson["choices"][0]["message"]["content"]
|
||||
return content
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calling LangDoc API: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error calling LangDoc API: {str(e)}")
|
||||
|
||||
async def callAiImage(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None) -> str:
|
||||
"""
|
||||
Analyzes an image using LangDoc's vision capabilities.
|
||||
|
||||
Args:
|
||||
imageData: Either a file path (str) or image data (bytes)
|
||||
mimeType: The MIME type of the image (optional, only for binary data)
|
||||
prompt: The prompt for analysis
|
||||
|
||||
Returns:
|
||||
The analysis response as text
|
||||
"""
|
||||
try:
|
||||
# Distinguish between file path and binary data
|
||||
if isinstance(imageData, str):
|
||||
# It's a file path - import filehandling only when needed
|
||||
from modules import agentserviceFilemanager as fileHandler
|
||||
base64Data, autoMimeType = fileHandler.encodeFileToBase64(imageData)
|
||||
mimeType = mimeType or autoMimeType
|
||||
else:
|
||||
# It's binary data
|
||||
import base64
|
||||
base64Data = base64.b64encode(imageData).decode('utf-8')
|
||||
# MIME type must be specified for binary data
|
||||
if not mimeType:
|
||||
# Fallback to generic image type
|
||||
mimeType = "image/png"
|
||||
|
||||
# Prepare the payload for the Vision API
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": prompt},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{mimeType};base64,{base64Data}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
# Use the existing callAiBasic function
|
||||
response = await self.callAiBasic(messages)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during image analysis: {str(e)}", exc_info=True)
|
||||
return f"[Error during image analysis: {str(e)}]"
|
||||
|
||||
async def listModels(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Lists available models from the LangDoc API.
|
||||
|
||||
Returns:
|
||||
List of available models with their details
|
||||
"""
|
||||
try:
|
||||
# LangDoc uses OpenAI-compatible endpoints
|
||||
modelsUrl = self.apiUrl.replace("/chat/completions", "/models")
|
||||
|
||||
response = await self.httpClient.get(modelsUrl)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_detail = f"LangDoc API error listing models: {response.status_code} - {response.text}"
|
||||
logger.error(error_detail)
|
||||
raise HTTPException(status_code=500, detail=error_detail)
|
||||
|
||||
responseJson = response.json()
|
||||
return responseJson.get("data", [])
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing LangDoc models: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error listing LangDoc models: {str(e)}")
|
||||
|
||||
async def getModelInfo(self, modelName: str = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Gets information about a specific model.
|
||||
|
||||
Args:
|
||||
modelName: Name of the model to get info for (uses default if None)
|
||||
|
||||
Returns:
|
||||
Model information dictionary
|
||||
"""
|
||||
try:
|
||||
if modelName is None:
|
||||
modelName = self.modelName
|
||||
|
||||
models = await self.listModels()
|
||||
|
||||
for model in models:
|
||||
if model.get("id") == modelName:
|
||||
return model
|
||||
|
||||
raise HTTPException(status_code=404, detail=f"Model {modelName} not found")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting LangDoc model info: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error getting LangDoc model info: {str(e)}")
|
||||
|
||||
async def generateImage(self, prompt: str, size: str = "1024x1024", quality: str = "standard", style: str = "vivid") -> Dict[str, Any]:
|
||||
"""
|
||||
Generates an image using LangDoc's DALL-E 3 integration.
|
||||
|
||||
Args:
|
||||
prompt: Text description of the image to generate
|
||||
size: Image size - "1024x1024", "1792x1024", or "1024x1792"
|
||||
quality: Image quality - "standard" or "hd"
|
||||
style: Image style - "vivid" or "natural"
|
||||
|
||||
Returns:
|
||||
Dictionary containing the generated image data and metadata
|
||||
|
||||
Raises:
|
||||
HTTPException: For errors in API communication
|
||||
"""
|
||||
try:
|
||||
# Use OpenAI-compatible images endpoint
|
||||
imagesUrl = self.apiUrl.replace("/chat/completions", "/images/generations")
|
||||
|
||||
payload = {
|
||||
"model": "dall-e-3",
|
||||
"prompt": prompt,
|
||||
"size": size,
|
||||
"quality": quality,
|
||||
"style": style,
|
||||
"n": 1
|
||||
}
|
||||
|
||||
response = await self.httpClient.post(
|
||||
imagesUrl,
|
||||
json=payload
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_detail = f"LangDoc Image Generation API error: {response.status_code} - {response.text}"
|
||||
logger.error(error_detail)
|
||||
|
||||
# Provide more specific error messages
|
||||
if response.status_code == 429:
|
||||
error_message = "Rate limit exceeded for image generation. Please wait before making another request."
|
||||
elif response.status_code == 401:
|
||||
error_message = "Invalid API key for image generation. Please check your LangDoc API configuration."
|
||||
elif response.status_code == 400:
|
||||
error_message = f"Invalid request to LangDoc Image API: {response.text}"
|
||||
else:
|
||||
error_message = f"LangDoc Image API error ({response.status_code}): {response.text}"
|
||||
|
||||
raise HTTPException(status_code=500, detail=error_message)
|
||||
|
||||
responseJson = response.json()
|
||||
|
||||
# Extract image data
|
||||
imageData = responseJson.get("data", [])
|
||||
if not imageData:
|
||||
raise HTTPException(status_code=500, detail="No image data returned from LangDoc API")
|
||||
|
||||
imageInfo = imageData[0]
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"image_url": imageInfo.get("url"),
|
||||
"revised_prompt": imageInfo.get("revised_prompt"),
|
||||
"size": size,
|
||||
"quality": quality,
|
||||
"style": style,
|
||||
"model": "dall-e-3",
|
||||
"created": responseJson.get("created"),
|
||||
"raw_response": responseJson
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating image with LangDoc: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error generating image with LangDoc: {str(e)}")
|
||||
|
||||
async def generateImageWithVariations(self, prompt: str, variations: int = 1, size: str = "1024x1024", quality: str = "standard", style: str = "vivid") -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generates multiple image variations using LangDoc's DALL-E 3 integration.
|
||||
|
||||
Args:
|
||||
prompt: Text description of the image to generate
|
||||
variations: Number of variations to generate (1-4)
|
||||
size: Image size - "1024x1024", "1792x1024", or "1024x1792"
|
||||
quality: Image quality - "standard" or "hd"
|
||||
style: Image style - "vivid" or "natural"
|
||||
|
||||
Returns:
|
||||
List of dictionaries containing generated image data and metadata
|
||||
|
||||
Raises:
|
||||
HTTPException: For errors in API communication
|
||||
"""
|
||||
try:
|
||||
# Limit variations to reasonable number
|
||||
variations = min(max(variations, 1), 4)
|
||||
|
||||
# Use OpenAI-compatible images endpoint
|
||||
imagesUrl = self.apiUrl.replace("/chat/completions", "/images/generations")
|
||||
|
||||
results = []
|
||||
|
||||
# Generate multiple variations by making multiple API calls
|
||||
for i in range(variations):
|
||||
# Add variation to prompt to get different results
|
||||
variationPrompt = f"{prompt} (variation {i+1})"
|
||||
|
||||
payload = {
|
||||
"model": "dall-e-3",
|
||||
"prompt": variationPrompt,
|
||||
"size": size,
|
||||
"quality": quality,
|
||||
"style": style,
|
||||
"n": 1
|
||||
}
|
||||
|
||||
response = await self.httpClient.post(
|
||||
imagesUrl,
|
||||
json=payload
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.warning(f"Failed to generate variation {i+1}: {response.status_code} - {response.text}")
|
||||
continue
|
||||
|
||||
responseJson = response.json()
|
||||
imageData = responseJson.get("data", [])
|
||||
|
||||
if imageData:
|
||||
imageInfo = imageData[0]
|
||||
results.append({
|
||||
"variation": i + 1,
|
||||
"image_url": imageInfo.get("url"),
|
||||
"revised_prompt": imageInfo.get("revised_prompt"),
|
||||
"size": size,
|
||||
"quality": quality,
|
||||
"style": style,
|
||||
"model": "dall-e-3",
|
||||
"created": responseJson.get("created")
|
||||
})
|
||||
|
||||
# Add small delay between requests to avoid rate limiting
|
||||
if i < variations - 1:
|
||||
await asyncio.sleep(1)
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating image variations with LangDoc: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error generating image variations with LangDoc: {str(e)}")
|
||||
|
||||
async def generateImageWithChat(self, prompt: str, size: str = "1024x1024", quality: str = "standard", style: str = "vivid") -> str:
|
||||
"""
|
||||
Generates an image using LangDoc's chat interface with image generation tools.
|
||||
This method uses the chat completions endpoint with image generation capabilities.
|
||||
|
||||
Args:
|
||||
prompt: Text description of the image to generate
|
||||
size: Image size - "1024x1024", "1792x1024", or "1024x1792"
|
||||
quality: Image quality - "standard" or "hd"
|
||||
style: Image style - "vivid" or "natural"
|
||||
|
||||
Returns:
|
||||
Response text from the chat model (may include image references)
|
||||
|
||||
Raises:
|
||||
HTTPException: For errors in API communication
|
||||
"""
|
||||
try:
|
||||
# Create a prompt that requests image generation
|
||||
imagePrompt = f"Please generate an image with the following description: {prompt}. Size: {size}, Quality: {quality}, Style: {style}"
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": imagePrompt
|
||||
}
|
||||
]
|
||||
|
||||
# Use the chat completions endpoint
|
||||
response = await self.callAiBasic(messages)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating image with chat: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error generating image with chat: {str(e)}")
|
||||
|
||||
async def _testConnection(self) -> bool:
|
||||
"""
|
||||
Tests the connection to the LangDoc API.
|
||||
|
||||
Returns:
|
||||
True if connection is successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Try to list models as a simple connection test
|
||||
await self.listModels()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"LangDoc connection test failed: {str(e)}")
|
||||
return False
|
||||
|
|
@ -665,6 +665,13 @@ class ConnectorGoogleSpeech:
|
|||
|
||||
# Build the voice request
|
||||
selected_voice = voice_name or self._get_default_voice(language_code)
|
||||
|
||||
if not selected_voice:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"No voice specified for language {language_code}. Please select a voice."
|
||||
}
|
||||
|
||||
logger.info(f"Using TTS voice: {selected_voice} for language: {language_code}")
|
||||
|
||||
voice = texttospeech.VoiceSelectionParams(
|
||||
|
|
@ -704,121 +711,108 @@ class ConnectorGoogleSpeech:
|
|||
def _get_default_voice(self, language_code: str) -> str:
|
||||
"""
|
||||
Get default voice name for a language code.
|
||||
Uses female voices as default for better user experience.
|
||||
Returns None - no defaults, let the frontend handle voice selection.
|
||||
"""
|
||||
voice_mapping = {
|
||||
# European Languages
|
||||
'de-DE': 'de-DE-Wavenet-B', # German, female
|
||||
'en-US': 'en-US-Wavenet-B', # English US, female
|
||||
'en-GB': 'en-GB-Wavenet-B', # English UK, female
|
||||
'en-AU': 'en-AU-Wavenet-B', # English Australia, female
|
||||
'en-CA': 'en-CA-Wavenet-B', # English Canada, female
|
||||
'en-IN': 'en-IN-Wavenet-B', # English India, female
|
||||
'fr-FR': 'fr-FR-Wavenet-B', # French, female
|
||||
'fr-CA': 'fr-CA-Wavenet-B', # French Canada, female
|
||||
'es-ES': 'es-ES-Wavenet-B', # Spanish Spain, female
|
||||
'es-MX': 'es-MX-Wavenet-B', # Spanish Mexico, female
|
||||
'es-AR': 'es-AR-Wavenet-B', # Spanish Argentina, female
|
||||
'es-CO': 'es-CO-Wavenet-B', # Spanish Colombia, female
|
||||
'es-PE': 'es-PE-Wavenet-B', # Spanish Peru, female
|
||||
'es-VE': 'es-VE-Wavenet-B', # Spanish Venezuela, female
|
||||
'es-CL': 'es-CL-Wavenet-B', # Spanish Chile, female
|
||||
'es-UY': 'es-UY-Wavenet-B', # Spanish Uruguay, female
|
||||
'es-BO': 'es-BO-Wavenet-B', # Spanish Bolivia, female
|
||||
'es-CR': 'es-CR-Wavenet-B', # Spanish Costa Rica, female
|
||||
'es-EC': 'es-EC-Wavenet-B', # Spanish Ecuador, female
|
||||
'es-GT': 'es-GT-Wavenet-B', # Spanish Guatemala, female
|
||||
'es-HN': 'es-HN-Wavenet-B', # Spanish Honduras, female
|
||||
'es-NI': 'es-NI-Wavenet-B', # Spanish Nicaragua, female
|
||||
'es-PA': 'es-PA-Wavenet-B', # Spanish Panama, female
|
||||
'es-PY': 'es-PY-Wavenet-B', # Spanish Paraguay, female
|
||||
'es-PR': 'es-PR-Wavenet-B', # Spanish Puerto Rico, female
|
||||
'es-DO': 'es-DO-Wavenet-B', # Spanish Dominican Republic, female
|
||||
'es-SV': 'es-SV-Wavenet-B', # Spanish El Salvador, female
|
||||
'it-IT': 'it-IT-Wavenet-B', # Italian, female
|
||||
'pt-PT': 'pt-PT-Wavenet-B', # Portuguese Portugal, female
|
||||
'pt-BR': 'pt-BR-Wavenet-B', # Portuguese Brazil, female
|
||||
'nl-NL': 'nl-NL-Wavenet-B', # Dutch, female
|
||||
'pl-PL': 'pl-PL-Wavenet-B', # Polish, female
|
||||
'ru-RU': 'ru-RU-Wavenet-B', # Russian, female
|
||||
'uk-UA': 'uk-UA-Wavenet-B', # Ukrainian, female
|
||||
'cs-CZ': 'cs-CZ-Wavenet-B', # Czech, female
|
||||
'sk-SK': 'sk-SK-Wavenet-B', # Slovak, female
|
||||
'hu-HU': 'hu-HU-Wavenet-B', # Hungarian, female
|
||||
'ro-RO': 'ro-RO-Wavenet-B', # Romanian, female
|
||||
'bg-BG': 'bg-BG-Wavenet-B', # Bulgarian, female
|
||||
'hr-HR': 'hr-HR-Wavenet-B', # Croatian, female
|
||||
'sr-RS': 'sr-RS-Wavenet-B', # Serbian, female
|
||||
'sl-SI': 'sl-SI-Wavenet-B', # Slovenian, female
|
||||
'et-EE': 'et-EE-Wavenet-B', # Estonian, female
|
||||
'lv-LV': 'lv-LV-Wavenet-B', # Latvian, female
|
||||
'lt-LT': 'lt-LT-Wavenet-B', # Lithuanian, female
|
||||
'fi-FI': 'fi-FI-Wavenet-B', # Finnish, female
|
||||
'sv-SE': 'sv-SE-Wavenet-B', # Swedish, female
|
||||
'no-NO': 'no-NO-Wavenet-B', # Norwegian, female
|
||||
'da-DK': 'da-DK-Wavenet-B', # Danish, female
|
||||
'is-IS': 'is-IS-Wavenet-B', # Icelandic, female
|
||||
'el-GR': 'el-GR-Wavenet-B', # Greek, female
|
||||
'ca-ES': 'ca-ES-Wavenet-B', # Catalan, female
|
||||
'eu-ES': 'eu-ES-Wavenet-B', # Basque, female
|
||||
'gl-ES': 'gl-ES-Wavenet-B', # Galician, female
|
||||
'cy-GB': 'cy-GB-Wavenet-B', # Welsh, female
|
||||
'ga-IE': 'ga-IE-Wavenet-B', # Irish, female
|
||||
'mt-MT': 'mt-MT-Wavenet-B', # Maltese, female
|
||||
return None
|
||||
|
||||
async def get_available_languages(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get available languages from Google Cloud Text-to-Speech.
|
||||
|
||||
Returns:
|
||||
Dict containing success status and list of available languages
|
||||
"""
|
||||
try:
|
||||
logger.info("🌐 Getting available languages from Google Cloud TTS")
|
||||
|
||||
# Asian Languages
|
||||
'ja-JP': 'ja-JP-Wavenet-B', # Japanese, female
|
||||
'ko-KR': 'ko-KR-Wavenet-B', # Korean, female
|
||||
'zh-CN': 'cmn-CN-Wavenet-B', # Chinese Mandarin, female
|
||||
'zh-TW': 'cmn-TW-Wavenet-B', # Chinese Traditional, female
|
||||
'zh-HK': 'cmn-HK-Wavenet-B', # Chinese Hong Kong, female
|
||||
'hi-IN': 'hi-IN-Wavenet-B', # Hindi, female
|
||||
'bn-IN': 'bn-IN-Wavenet-B', # Bengali, female
|
||||
'te-IN': 'te-IN-Wavenet-B', # Telugu, female
|
||||
'ta-IN': 'ta-IN-Wavenet-B', # Tamil, female
|
||||
'gu-IN': 'gu-IN-Wavenet-B', # Gujarati, female
|
||||
'kn-IN': 'kn-IN-Wavenet-B', # Kannada, female
|
||||
'ml-IN': 'ml-IN-Wavenet-B', # Malayalam, female
|
||||
'pa-IN': 'pa-IN-Wavenet-B', # Punjabi, female
|
||||
'or-IN': 'or-IN-Wavenet-B', # Odia, female
|
||||
'as-IN': 'as-IN-Wavenet-B', # Assamese, female
|
||||
'ne-NP': 'ne-NP-Wavenet-B', # Nepali, female
|
||||
'si-LK': 'si-LK-Wavenet-B', # Sinhala, female
|
||||
'th-TH': 'th-TH-Wavenet-B', # Thai, female
|
||||
'vi-VN': 'vi-VN-Wavenet-B', # Vietnamese, female
|
||||
'id-ID': 'id-ID-Wavenet-B', # Indonesian, female
|
||||
'ms-MY': 'ms-MY-Wavenet-B', # Malay, female
|
||||
'tl-PH': 'fil-PH-Wavenet-B', # Filipino, female
|
||||
'tr-TR': 'tr-TR-Wavenet-B', # Turkish, female
|
||||
# List voices from Google Cloud TTS
|
||||
voices = self.tts_client.list_voices()
|
||||
|
||||
# Middle Eastern & African Languages
|
||||
'ar-SA': 'ar-SA-Wavenet-B', # Arabic Saudi Arabia, female
|
||||
'ar-EG': 'ar-EG-Wavenet-B', # Arabic Egypt, female
|
||||
'ar-AE': 'ar-AE-Wavenet-B', # Arabic UAE, female
|
||||
'ar-JO': 'ar-JO-Wavenet-B', # Arabic Jordan, female
|
||||
'ar-KW': 'ar-KW-Wavenet-B', # Arabic Kuwait, female
|
||||
'ar-LB': 'ar-LB-Wavenet-B', # Arabic Lebanon, female
|
||||
'ar-QA': 'ar-QA-Wavenet-B', # Arabic Qatar, female
|
||||
'ar-BH': 'ar-BH-Wavenet-B', # Arabic Bahrain, female
|
||||
'ar-OM': 'ar-OM-Wavenet-B', # Arabic Oman, female
|
||||
'ar-IQ': 'ar-IQ-Wavenet-B', # Arabic Iraq, female
|
||||
'ar-PS': 'ar-PS-Wavenet-B', # Arabic Palestine, female
|
||||
'ar-SY': 'ar-SY-Wavenet-B', # Arabic Syria, female
|
||||
'ar-YE': 'ar-YE-Wavenet-B', # Arabic Yemen, female
|
||||
'ar-MA': 'ar-MA-Wavenet-B', # Arabic Morocco, female
|
||||
'ar-DZ': 'ar-DZ-Wavenet-B', # Arabic Algeria, female
|
||||
'ar-TN': 'ar-TN-Wavenet-B', # Arabic Tunisia, female
|
||||
'ar-LY': 'ar-LY-Wavenet-B', # Arabic Libya, female
|
||||
'ar-SD': 'ar-SD-Wavenet-B', # Arabic Sudan, female
|
||||
'he-IL': 'he-IL-Wavenet-B', # Hebrew, female
|
||||
'fa-IR': 'fa-IR-Wavenet-B', # Persian, female
|
||||
'ur-PK': 'ur-PK-Wavenet-B', # Urdu, female
|
||||
'af-ZA': 'af-ZA-Wavenet-B', # Afrikaans, female
|
||||
'sw-KE': 'sw-KE-Wavenet-B', # Swahili Kenya, female
|
||||
'am-ET': 'am-ET-Wavenet-B', # Amharic, female
|
||||
'sw-TZ': 'sw-TZ-Wavenet-B', # Swahili Tanzania, female
|
||||
'zu-ZA': 'zu-ZA-Wavenet-B', # Zulu, female
|
||||
'xh-ZA': 'xh-ZA-Wavenet-B', # Xhosa, female
|
||||
}
|
||||
return voice_mapping.get(language_code, 'en-US-Wavenet-B')
|
||||
# Extract unique language codes
|
||||
language_codes = set()
|
||||
for voice in voices:
|
||||
if voice.language_codes:
|
||||
language_codes.update(voice.language_codes)
|
||||
|
||||
# Convert to sorted list
|
||||
available_languages = sorted(list(language_codes))
|
||||
|
||||
logger.info(f"✅ Found {len(available_languages)} available languages")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"languages": available_languages
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to get available languages: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"languages": []
|
||||
}
|
||||
|
||||
async def get_available_voices(self, language_code: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get available voices from Google Cloud Text-to-Speech.
|
||||
|
||||
Args:
|
||||
language_code: Optional language code to filter voices (e.g., 'de-DE', 'en-US')
|
||||
|
||||
Returns:
|
||||
Dict containing success status and list of available voices
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🎤 Getting available voices from Google Cloud TTS, language filter: {language_code}")
|
||||
|
||||
# List voices from Google Cloud TTS
|
||||
voices = self.tts_client.list_voices()
|
||||
|
||||
available_voices = []
|
||||
|
||||
for voice in voices:
|
||||
# Extract language code from voice name (e.g., 'de-DE-Wavenet-A' -> 'de-DE')
|
||||
voice_language = voice.language_codes[0] if voice.language_codes else None
|
||||
|
||||
# Filter by language if specified
|
||||
if language_code and voice_language != language_code:
|
||||
continue
|
||||
|
||||
# Determine gender from voice name (A/C = male, B/D = female)
|
||||
gender = "Unknown"
|
||||
if voice.name:
|
||||
if voice.name.endswith(('-A', '-C')):
|
||||
gender = "Male"
|
||||
elif voice.name.endswith(('-B', '-D')):
|
||||
gender = "Female"
|
||||
|
||||
# Create voice info
|
||||
voice_info = {
|
||||
"name": voice.name,
|
||||
"language_code": voice_language,
|
||||
"gender": gender,
|
||||
"ssml_gender": voice.ssml_gender.name if voice.ssml_gender else "NEUTRAL",
|
||||
"natural_sample_rate_hertz": voice.natural_sample_rate_hertz
|
||||
}
|
||||
|
||||
available_voices.append(voice_info)
|
||||
|
||||
# Sort by language code, then by gender, then by name
|
||||
available_voices.sort(key=lambda x: (x["language_code"], x["gender"], x["name"]))
|
||||
|
||||
logger.info(f"✅ Found {len(available_voices)} voices for language filter: {language_code}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"voices": available_voices,
|
||||
"total_count": len(available_voices)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to get available voices: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"voices": []
|
||||
}
|
||||
|
||||
|
|
@ -1,16 +1,98 @@
|
|||
from typing import Optional
|
||||
from typing import Optional, List
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AiCallOptions(BaseModel):
|
||||
"""Options for centralized AI processing (no document extraction here)."""
|
||||
# Operation Types
|
||||
class OperationType:
|
||||
GENERAL = "general"
|
||||
GENERATE_PLAN = "generate_plan"
|
||||
ANALYSE_CONTENT = "analyse_content"
|
||||
GENERATE_CONTENT = "generate_content"
|
||||
WEB_RESEARCH = "web_research"
|
||||
IMAGE_ANALYSIS = "image_analysis"
|
||||
IMAGE_GENERATION = "image_generation"
|
||||
|
||||
operationType: str = Field(default="general", description="Type of operation")
|
||||
|
||||
# Processing Modes
|
||||
class ProcessingMode:
|
||||
BASIC = "basic"
|
||||
ADVANCED = "advanced"
|
||||
DETAILED = "detailed"
|
||||
|
||||
|
||||
# Priority Levels
|
||||
class Priority:
|
||||
SPEED = "speed"
|
||||
QUALITY = "quality"
|
||||
COST = "cost"
|
||||
BALANCED = "balanced"
|
||||
|
||||
|
||||
# Model Tags
|
||||
class ModelTags:
|
||||
# Core capabilities
|
||||
TEXT = "text"
|
||||
CHAT = "chat"
|
||||
REASONING = "reasoning"
|
||||
ANALYSIS = "analysis"
|
||||
IMAGE = "image"
|
||||
VISION = "vision"
|
||||
MULTIMODAL = "multimodal"
|
||||
WEB = "web"
|
||||
SEARCH = "search"
|
||||
CRAWL = "crawl"
|
||||
EXTRACT = "extract"
|
||||
CONTENT = "content"
|
||||
INFORMATION = "information"
|
||||
|
||||
# Quality indicators
|
||||
HIGH_QUALITY = "high_quality"
|
||||
FAST = "fast"
|
||||
COST_EFFECTIVE = "cost_effective"
|
||||
GENERAL = "general"
|
||||
|
||||
# Specialized capabilities
|
||||
IMAGE_GENERATION = "image_generation"
|
||||
ART = "art"
|
||||
VISUAL = "visual"
|
||||
VARIATIONS = "variations"
|
||||
API = "api"
|
||||
INFO = "info"
|
||||
MODELS = "models"
|
||||
|
||||
|
||||
# Operation Type to Required Tags Mapping
|
||||
OPERATION_TAG_MAPPING = {
|
||||
OperationType.GENERAL: [ModelTags.TEXT, ModelTags.CHAT, ModelTags.REASONING],
|
||||
OperationType.GENERATE_PLAN: [ModelTags.TEXT, ModelTags.REASONING, ModelTags.ANALYSIS],
|
||||
OperationType.ANALYSE_CONTENT: [ModelTags.TEXT, ModelTags.ANALYSIS, ModelTags.REASONING],
|
||||
OperationType.GENERATE_CONTENT: [ModelTags.TEXT, ModelTags.CHAT, ModelTags.REASONING],
|
||||
OperationType.WEB_RESEARCH: [ModelTags.TEXT, ModelTags.ANALYSIS, ModelTags.REASONING],
|
||||
OperationType.IMAGE_ANALYSIS: [ModelTags.IMAGE, ModelTags.VISION, ModelTags.MULTIMODAL],
|
||||
OperationType.IMAGE_GENERATION: [ModelTags.IMAGE_GENERATION, ModelTags.ART, ModelTags.VISUAL],
|
||||
}
|
||||
|
||||
|
||||
# Processing Mode to Priority Mapping
|
||||
PROCESSING_MODE_PRIORITY_MAPPING = {
|
||||
ProcessingMode.BASIC: Priority.SPEED,
|
||||
ProcessingMode.ADVANCED: Priority.BALANCED,
|
||||
ProcessingMode.DETAILED: Priority.QUALITY,
|
||||
}
|
||||
|
||||
|
||||
class AiCallOptions(BaseModel):
|
||||
"""Options for centralized AI processing with clear operation types and tags."""
|
||||
|
||||
operationType: str = Field(default="general", description="Type of operation: general, generate_plan, analyse_content, generate_content, web_research")
|
||||
priority: str = Field(default="balanced", description="speed|quality|cost|balanced")
|
||||
compressPrompt: bool = Field(default=True, description="Whether to compress the prompt")
|
||||
compressContext: bool = Field(default=True, description="Whether to compress optional context")
|
||||
maxCost: Optional[float] = Field(default=None, description="Max cost budget")
|
||||
maxProcessingTime: Optional[int] = Field(default=None, description="Max processing time in seconds")
|
||||
requiredTags: Optional[List[str]] = Field(default=None, description="Required model tags for selection")
|
||||
processingMode: str = Field(default="basic", description="Processing mode: basic, advanced, detailed")
|
||||
resultFormat: Optional[str] = Field(default=None, description="Expected result format: txt, json, csv, xml, etc.")
|
||||
|
||||
|
||||
class AiCallRequest(BaseModel):
|
||||
|
|
|
|||
|
|
@ -260,3 +260,45 @@ register_model_labels(
|
|||
)
|
||||
|
||||
|
||||
class WorkflowResult(BaseModel, ModelMixin):
|
||||
status: str
|
||||
completed_tasks: int
|
||||
total_tasks: int
|
||||
execution_time: float
|
||||
final_results_count: int
|
||||
error: Optional[str] = None
|
||||
phase: Optional[str] = None
|
||||
|
||||
|
||||
register_model_labels(
|
||||
"WorkflowResult",
|
||||
{"en": "Workflow Result", "fr": "Résultat du workflow"},
|
||||
{
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"completed_tasks": {"en": "Completed Tasks", "fr": "Tâches terminées"},
|
||||
"total_tasks": {"en": "Total Tasks", "fr": "Total des tâches"},
|
||||
"execution_time": {"en": "Execution Time", "fr": "Temps d'exécution"},
|
||||
"final_results_count": {"en": "Final Results Count", "fr": "Nombre de résultats finaux"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"phase": {"en": "Phase", "fr": "Phase"},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class UserInputRequest(BaseModel, ModelMixin):
|
||||
prompt: str = Field(description="Prompt for the user")
|
||||
listFileId: List[str] = Field(default_factory=list, description="List of file IDs")
|
||||
userLanguage: str = Field(default="en", description="User's preferred language")
|
||||
|
||||
|
||||
register_model_labels(
|
||||
"UserInputRequest",
|
||||
{"en": "User Input Request", "fr": "Demande de saisie utilisateur"},
|
||||
{
|
||||
"prompt": {"en": "Prompt", "fr": "Invite"},
|
||||
"listFileId": {"en": "File IDs", "fr": "IDs des fichiers"},
|
||||
"userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
"""Web-related modules.datamodels (search, crawl, scrape)."""
|
||||
"""Web-related modules"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from pydantic import BaseModel, Field, HttpUrl
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ class ActionDocument(BaseModel, ModelMixin):
|
|||
documentName: str = Field(description="Name of the document")
|
||||
documentData: Any = Field(description="Content/data of the document")
|
||||
mimeType: str = Field(description="MIME type of the document")
|
||||
|
||||
|
||||
register_model_labels(
|
||||
"ActionDocument",
|
||||
{"en": "Action Document", "fr": "Document d'action"},
|
||||
|
|
@ -43,8 +41,6 @@ class ActionResult(BaseModel, ModelMixin):
|
|||
@classmethod
|
||||
def isFailure(cls, error: str, documents: List[ActionDocument] = None) -> "ActionResult":
|
||||
return cls(success=False, documents=documents or [], error=error)
|
||||
|
||||
|
||||
register_model_labels(
|
||||
"ActionResult",
|
||||
{"en": "Action Result", "fr": "Résultat de l'action"},
|
||||
|
|
@ -57,9 +53,6 @@ register_model_labels(
|
|||
)
|
||||
|
||||
|
||||
# ===== Additional workflow models migrated from interfaceChatModel =====
|
||||
|
||||
|
||||
class ActionSelection(BaseModel, ModelMixin):
|
||||
method: str = Field(description="Method to execute (e.g., web, document, ai)")
|
||||
name: str = Field(description="Action name within the method (e.g., search, extract)")
|
||||
|
|
@ -402,45 +395,5 @@ register_model_labels(
|
|||
)
|
||||
|
||||
|
||||
class WorkflowResult(BaseModel, ModelMixin):
|
||||
status: str
|
||||
completed_tasks: int
|
||||
total_tasks: int
|
||||
execution_time: float
|
||||
final_results_count: int
|
||||
error: Optional[str] = None
|
||||
phase: Optional[str] = None
|
||||
|
||||
|
||||
register_model_labels(
|
||||
"WorkflowResult",
|
||||
{"en": "Workflow Result", "fr": "Résultat du workflow"},
|
||||
{
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"completed_tasks": {"en": "Completed Tasks", "fr": "Tâches terminées"},
|
||||
"total_tasks": {"en": "Total Tasks", "fr": "Total des tâches"},
|
||||
"execution_time": {"en": "Execution Time", "fr": "Temps d'exécution"},
|
||||
"final_results_count": {"en": "Final Results Count", "fr": "Nombre de résultats finaux"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"phase": {"en": "Phase", "fr": "Phase"},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class UserInputRequest(BaseModel, ModelMixin):
|
||||
prompt: str = Field(description="Prompt for the user")
|
||||
listFileId: List[str] = Field(default_factory=list, description="List of file IDs")
|
||||
userLanguage: str = Field(default="en", description="User's preferred language")
|
||||
|
||||
|
||||
register_model_labels(
|
||||
"UserInputRequest",
|
||||
{"en": "User Input Request", "fr": "Demande de saisie utilisateur"},
|
||||
{
|
||||
"prompt": {"en": "Prompt", "fr": "Invite"},
|
||||
"listFileId": {"en": "File IDs", "fr": "IDs des fichiers"},
|
||||
"userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -3,29 +3,27 @@ import asyncio
|
|||
from typing import Optional
|
||||
|
||||
from modules.datamodels.datamodelUam import User
|
||||
from modules.datamodels.datamodelChat import ChatWorkflow
|
||||
from modules.datamodels.datamodelWorkflow import UserInputRequest
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.datamodels.datamodelChat import ChatWorkflow, UserInputRequest
|
||||
from modules.workflows.workflowManager import WorkflowManager
|
||||
from modules.services import getInterface as getServices
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def chatStart(interfaceChat, currentUser: User, userInput: UserInputRequest, workflowId: Optional[str] = None, workflowMode: str = "Actionplan") -> ChatWorkflow:
|
||||
async def chatStart(interfaceDbChat, currentUser: User, userInput: UserInputRequest, workflowId: Optional[str] = None, workflowMode: str = "Actionplan") -> ChatWorkflow:
|
||||
"""
|
||||
Starts a new chat or continues an existing one, then launches processing asynchronously.
|
||||
|
||||
Args:
|
||||
interfaceChat: Chat interface instance
|
||||
interfaceDbChat: Chat interface instance
|
||||
currentUser: Current user
|
||||
userInput: User input request
|
||||
workflowId: Optional workflow ID to continue existing workflow
|
||||
workflowMode: "Actionplan" for traditional task planning, "React" for iterative react-style processing
|
||||
|
||||
Example usage for React mode:
|
||||
workflow = await chatStart(interfaceChat, currentUser, userInput, workflowMode="React")
|
||||
workflow = await chatStart(interfaceDbChat, currentUser, userInput, workflowMode="React")
|
||||
"""
|
||||
try:
|
||||
from modules.workflows.workflowManager import WorkflowManager
|
||||
from modules.services import getInterface as getServices
|
||||
services = getServices(currentUser, None)
|
||||
workflowManager = WorkflowManager(services)
|
||||
workflow = await workflowManager.workflowStart(userInput, workflowId, workflowMode)
|
||||
|
|
@ -34,11 +32,9 @@ async def chatStart(interfaceChat, currentUser: User, userInput: UserInputReques
|
|||
logger.error(f"Error starting chat: {str(e)}")
|
||||
raise
|
||||
|
||||
async def chatStop(interfaceChat, currentUser: User, workflowId: str) -> ChatWorkflow:
|
||||
async def chatStop(interfaceDbChat, currentUser: User, workflowId: str) -> ChatWorkflow:
|
||||
"""Stops a running chat."""
|
||||
try:
|
||||
from modules.workflows.workflowManager import WorkflowManager
|
||||
from modules.services import getInterface as getServices
|
||||
services = getServices(currentUser, None)
|
||||
workflowManager = WorkflowManager(services)
|
||||
return await workflowManager.workflowStop(workflowId)
|
||||
|
|
|
|||
|
|
@ -2,8 +2,13 @@
|
|||
|
||||
import asyncio
|
||||
import logging
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
rootInterface = getRootInterface()
|
||||
eventUser = rootInterface.getUserByUsername("event")
|
||||
|
||||
# Custom features launch
|
||||
|
||||
# Import the syncDelta module to initialize it (this will register the scheduler)
|
||||
from modules.features.syncDelta import mainSyncDelta
|
||||
mainSyncDelta.startSyncManager(eventUser)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import logging
|
||||
import asyncio
|
||||
from typing import Any, Dict, List, Optional
|
||||
from urllib.parse import urlparse, unquote
|
||||
|
||||
from modules.datamodels.datamodelUam import User
|
||||
from modules.datamodels.datamodelNeutralizer import DataNeutralizerAttributes, DataNeutraliserConfig
|
||||
from modules.services.serviceNeutralization.mainServiceNeutralization import NeutralizationService
|
||||
from modules.services import getInterface as getServices
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -13,17 +15,17 @@ class NeutralizationPlayground:
|
|||
|
||||
def __init__(self, currentUser: User):
|
||||
self.currentUser = currentUser
|
||||
self.service = NeutralizationService(currentUser)
|
||||
self.services = getServices(currentUser, None)
|
||||
|
||||
def processText(self, text: str) -> Dict[str, Any]:
|
||||
return self.service.processText(text)
|
||||
return self.services.neutralization.processText(text)
|
||||
|
||||
def processFiles(self, fileIds: List[str]) -> Dict[str, Any]:
|
||||
results: List[Dict[str, Any]] = []
|
||||
errors: List[str] = []
|
||||
for fileId in fileIds:
|
||||
try:
|
||||
res = self.service.processFile(fileId)
|
||||
res = self.services.neutralization.processFile(fileId)
|
||||
results.append({
|
||||
'file_id': fileId,
|
||||
'neutralized_file_name': res.get('neutralized_file_name'),
|
||||
|
|
@ -42,20 +44,18 @@ class NeutralizationPlayground:
|
|||
}
|
||||
|
||||
async def processSharepointFiles(self, sourcePath: str, targetPath: str) -> Dict[str, Any]:
|
||||
from modules.features.neutralizePlayground.sharepoint import SharepointProcessor
|
||||
processor = SharepointProcessor(self.currentUser, self.service)
|
||||
from modules.services.serviceSharepoint.mainServiceSharepoint import SharepointService
|
||||
processor = SharepointProcessor(self.currentUser, self.services)
|
||||
return await processor.processSharepointFiles(sourcePath, targetPath)
|
||||
|
||||
# Cleanup attributes
|
||||
def cleanAttributes(self, fileId: str) -> bool:
|
||||
if not self.service.app_interface:
|
||||
return False
|
||||
return self.service.app_interface.deleteNeutralizationAttributes(fileId)
|
||||
return self.services.neutralization.deleteNeutralizationAttributes(fileId)
|
||||
|
||||
# Stats
|
||||
def getStats(self) -> Dict[str, Any]:
|
||||
try:
|
||||
allAttributes = self.service._getAttributes()
|
||||
allAttributes = self.services.neutralization.getAttributes()
|
||||
patternCounts: Dict[str, int] = {}
|
||||
for attr in allAttributes:
|
||||
patternType = attr.patternType
|
||||
|
|
@ -79,26 +79,24 @@ class NeutralizationPlayground:
|
|||
# Additional methods needed by the route
|
||||
def get_config(self) -> Optional[DataNeutraliserConfig]:
|
||||
"""Get neutralization configuration"""
|
||||
return self.service.getConfig()
|
||||
return self.services.neutralization.getConfig()
|
||||
|
||||
def save_config(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig:
|
||||
"""Save neutralization configuration"""
|
||||
return self.service.saveConfig(config_data)
|
||||
return self.services.neutralization.saveConfig(config_data)
|
||||
|
||||
def neutralize_text(self, text: str, file_id: str = None) -> Dict[str, Any]:
|
||||
"""Neutralize text content"""
|
||||
return self.service.processText(text)
|
||||
return self.services.neutralization.processText(text)
|
||||
|
||||
def resolve_text(self, text: str) -> str:
|
||||
"""Resolve UIDs in neutralized text back to original text"""
|
||||
return self.service.resolveText(text)
|
||||
return self.services.neutralization.resolveText(text)
|
||||
|
||||
def get_attributes(self, file_id: str = None) -> List[DataNeutralizerAttributes]:
|
||||
"""Get neutralization attributes, optionally filtered by file ID"""
|
||||
if not self.service.app_interface:
|
||||
return []
|
||||
try:
|
||||
all_attributes = self.service._getAttributes()
|
||||
all_attributes = self.services.neutralization.getAttributes()
|
||||
if file_id:
|
||||
return [attr for attr in all_attributes if attr.fileId == file_id]
|
||||
return all_attributes
|
||||
|
|
@ -126,13 +124,15 @@ class NeutralizationPlayground:
|
|||
|
||||
# Internal SharePoint helper module separated to keep feature logic tidy
|
||||
class SharepointProcessor:
|
||||
def __init__(self, currentUser: User, service: NeutralizationService):
|
||||
def __init__(self, currentUser: User, services):
|
||||
self.currentUser = currentUser
|
||||
self.service = service
|
||||
self.services = services
|
||||
|
||||
async def processSharepointFiles(self, sourcePath: str, targetPath: str) -> Dict[str, Any]:
|
||||
try:
|
||||
logger.info(f"Processing SharePoint files from {sourcePath} to {targetPath}")
|
||||
|
||||
# Get SharePoint connection
|
||||
connection = await self._getSharepointConnection(sourcePath)
|
||||
if not connection:
|
||||
return {
|
||||
|
|
@ -141,16 +141,17 @@ class SharepointProcessor:
|
|||
'processed_files': 0,
|
||||
'errors': ['No SharePoint connection found'],
|
||||
}
|
||||
from modules.security.tokenManager import TokenManager
|
||||
token = TokenManager().getFreshToken(self.service.app_interface, connection['id'])
|
||||
if not token:
|
||||
|
||||
# Set access token for SharePoint service
|
||||
if not self.services.sharepoint.setAccessTokenFromConnection(connection):
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'No SharePoint access token found',
|
||||
'message': 'Failed to set SharePoint access token',
|
||||
'processed_files': 0,
|
||||
'errors': ['No SharePoint access token found'],
|
||||
'errors': ['Failed to set SharePoint access token'],
|
||||
}
|
||||
return await self._processSharepointFilesAsync(sourcePath, targetPath, token.tokenAccess)
|
||||
|
||||
return await self._processSharepointFilesAsync(sourcePath, targetPath)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing SharePoint files: {str(e)}")
|
||||
return {
|
||||
|
|
@ -163,9 +164,9 @@ class SharepointProcessor:
|
|||
async def _getSharepointConnection(self, sharepointPath: str = None):
|
||||
try:
|
||||
from modules.datamodels.datamodelUam import UserConnection
|
||||
connections = self.service.app_interface.db.getRecordset(
|
||||
connections = self.services.interfaceDbApp.db.getRecordset(
|
||||
UserConnection,
|
||||
recordFilter={"userId": self.service.app_interface.userId}
|
||||
recordFilter={"userId": self.services.interfaceDbApp.userId}
|
||||
)
|
||||
msftConnections = [c for c in connections if c.get('authority') == 'msft']
|
||||
if not msftConnections:
|
||||
|
|
@ -184,16 +185,14 @@ class SharepointProcessor:
|
|||
|
||||
async def _matchConnectionToPath(self, connections: list, sharepointPath: str):
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
targetDomain = urlparse(sharepointPath).netloc.lower()
|
||||
logger.info(f"Looking for connection matching domain: {targetDomain}")
|
||||
from modules.security.tokenManager import TokenManager
|
||||
|
||||
for connection in connections:
|
||||
try:
|
||||
token = TokenManager().getFreshToken(self.service.app_interface, connection['id'])
|
||||
if not token:
|
||||
if not self.services.sharepoint.setAccessTokenFromConnection(connection):
|
||||
continue
|
||||
if await self._testSharepointAccess(token.tokenAccess, sharepointPath):
|
||||
if await self._testSharepointAccess(sharepointPath):
|
||||
logger.info(f"Found matching connection for domain {targetDomain}: {connection.get('id')}")
|
||||
return connection
|
||||
except Exception:
|
||||
|
|
@ -204,44 +203,33 @@ class SharepointProcessor:
|
|||
logger.error('Error matching connection to path')
|
||||
return connections[0] if connections else None
|
||||
|
||||
async def _testSharepointAccess(self, accessToken: str, sharepointPath: str) -> bool:
|
||||
async def _testSharepointAccess(self, sharepointPath: str) -> bool:
|
||||
try:
|
||||
return await self._testSharepointAccessAsync(accessToken, sharepointPath)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def _testSharepointAccessAsync(self, accessToken: str, sharepointPath: str) -> bool:
|
||||
try:
|
||||
from modules.services.serviceSharepoint.mainSharepoint import SharepointService
|
||||
connector = SharepointService(access_token=accessToken)
|
||||
siteUrl, _ = self._parseSharepointPath(sharepointPath)
|
||||
if not siteUrl:
|
||||
return False
|
||||
siteInfo = await connector.find_site_by_web_url(siteUrl)
|
||||
siteInfo = await self.services.sharepoint.find_site_by_web_url(siteUrl)
|
||||
return siteInfo is not None
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def _processSharepointFilesAsync(self, sourcePath: str, targetPath: str, accessToken: str) -> Dict[str, Any]:
|
||||
async def _processSharepointFilesAsync(self, sourcePath: str, targetPath: str) -> Dict[str, Any]:
|
||||
try:
|
||||
import asyncio
|
||||
from modules.services.serviceSharepoint.mainSharepoint import SharepointService
|
||||
connector = SharepointService(access_token=accessToken)
|
||||
sourceSite, sourceFolder = self._parseSharepointPath(sourcePath)
|
||||
targetSite, targetFolder = self._parseSharepointPath(targetPath)
|
||||
if not sourceSite or not targetSite:
|
||||
return {'success': False, 'message': 'Invalid SharePoint path format', 'processed_files': 0, 'errors': ['Invalid SharePoint path format']}
|
||||
sourceSiteInfo = await connector.find_site_by_web_url(sourceSite)
|
||||
sourceSiteInfo = await self.services.sharepoint.find_site_by_web_url(sourceSite)
|
||||
if not sourceSiteInfo:
|
||||
return {'success': False, 'message': f'Source site not found: {sourceSite}', 'processed_files': 0, 'errors': [f'Source site not found: {sourceSite}']}
|
||||
targetSiteInfo = await connector.find_site_by_web_url(targetSite)
|
||||
targetSiteInfo = await self.services.sharepoint.find_site_by_web_url(targetSite)
|
||||
if not targetSiteInfo:
|
||||
return {'success': False, 'message': f'Target site not found: {targetSite}', 'processed_files': 0, 'errors': [f'Target site not found: {targetSite}']}
|
||||
logger.info(f"Listing files in folder: {sourceFolder} for site: {sourceSiteInfo['id']}")
|
||||
files = await connector.list_folder_contents(sourceSiteInfo['id'], sourceFolder)
|
||||
files = await self.services.sharepoint.list_folder_contents(sourceSiteInfo['id'], sourceFolder)
|
||||
if not files:
|
||||
logger.warning(f"No files found in folder '{sourceFolder}', trying root folder")
|
||||
files = await connector.list_folder_contents(sourceSiteInfo['id'], '')
|
||||
files = await self.services.sharepoint.list_folder_contents(sourceSiteInfo['id'], '')
|
||||
if files:
|
||||
folders = [f for f in files if f.get('type') == 'folder']
|
||||
folderNames = [f.get('name') for f in folders]
|
||||
|
|
@ -263,16 +251,16 @@ class SharepointProcessor:
|
|||
|
||||
async def _processSingle(fileInfo: Dict[str, Any]):
|
||||
try:
|
||||
fileContent = await connector.download_file(sourceSiteInfo['id'], fileInfo['id'])
|
||||
fileContent = await self.services.sharepoint.download_file(sourceSiteInfo['id'], fileInfo['id'])
|
||||
if not fileContent:
|
||||
return {'error': f"Failed to download file: {fileInfo['name']}"}
|
||||
try:
|
||||
textContent = fileContent.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
textContent = fileContent.decode('latin-1')
|
||||
result = self.service._neutralizeText(textContent, 'text')
|
||||
result = self.services.neutralization.processText(textContent)
|
||||
neutralizedFilename = f"neutralized_{fileInfo['name']}"
|
||||
uploadResult = await connector.upload_file(targetSiteInfo['id'], targetFolder, neutralizedFilename, result['neutralized_text'].encode('utf-8'))
|
||||
uploadResult = await self.services.sharepoint.upload_file(targetSiteInfo['id'], targetFolder, neutralizedFilename, result['neutralized_text'].encode('utf-8'))
|
||||
if 'error' in uploadResult:
|
||||
return {'error': f"Failed to upload neutralized file: {neutralizedFilename} - {uploadResult['error']}"}
|
||||
return {
|
||||
|
|
@ -325,7 +313,6 @@ class SharepointProcessor:
|
|||
siteName = parts[1].split('/')[0]
|
||||
siteUrl = f"https://{domain}/sites/{siteName}"
|
||||
folderParts = parts[1].split('/')[1:]
|
||||
from urllib.parse import unquote
|
||||
folderPath = unquote('/'.join(folderParts) if folderParts else '')
|
||||
return siteUrl, folderPath
|
||||
except Exception:
|
||||
|
|
|
|||
|
|
@ -9,18 +9,15 @@ import asyncio
|
|||
import logging
|
||||
import os
|
||||
import io
|
||||
import pandas as pd
|
||||
import csv as csv_module
|
||||
from io import StringIO, BytesIO
|
||||
from datetime import datetime, UTC
|
||||
from typing import Dict, Any, List, Optional
|
||||
from modules.services import getInterface as getServices
|
||||
# Removed direct import - now using services.ticket
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get environment type from configuration
|
||||
APP_ENV_TYPE = APP_CONFIG.get("APP_ENV_TYPE", "dev")
|
||||
|
||||
|
||||
class ManagerSyncDelta:
|
||||
"""Manages Tickets to SharePoint synchronization for Delta Group.
|
||||
|
||||
|
|
@ -46,7 +43,7 @@ class ManagerSyncDelta:
|
|||
|
||||
# Tickets connection parameters
|
||||
JIRA_USERNAME = "p.motsch@valueon.ch"
|
||||
JIRA_API_TOKEN = APP_CONFIG.get("Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET", "")
|
||||
JIRA_API_TOKEN = "" # Will be set in __init__
|
||||
JIRA_URL = "https://deltasecurity.atlassian.net"
|
||||
JIRA_PROJECT_CODE = "DCS"
|
||||
JIRA_ISSUE_TYPE = "Task"
|
||||
|
|
@ -84,6 +81,9 @@ class ManagerSyncDelta:
|
|||
self._log_audit_event("SYNC_INIT", "FAILED", "Event user not found")
|
||||
else:
|
||||
self.services = getServices(eventUser, None)
|
||||
# Read config values using services
|
||||
self.APP_ENV_TYPE = self.services.utils.configGet("APP_ENV_TYPE", "dev")
|
||||
self.JIRA_API_TOKEN = self.services.utils.configGet("Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET", "")
|
||||
# Resolve SharePoint connection for the configured user id
|
||||
self.sharepointConnection = self.services.workflow.getUserConnectionByExternalUsername("msft", self.SHAREPOINT_USER_ID)
|
||||
if not self.sharepointConnection:
|
||||
|
|
@ -93,8 +93,8 @@ class ManagerSyncDelta:
|
|||
self._log_audit_event("SYNC_INIT", "FAILED", f"No SharePoint connection for user: {self.SHAREPOINT_USER_ID}")
|
||||
else:
|
||||
# Configure SharePoint service token and set connector reference
|
||||
if not self.services.sharepoint.setAccessToken(
|
||||
self.sharepointConnection, self.services.interfaceApp
|
||||
if not self.services.sharepoint.setAccessTokenFromConnection(
|
||||
self.sharepointConnection
|
||||
):
|
||||
logger.error("Failed to set SharePoint token from UserConnection")
|
||||
self._log_audit_event("SYNC_INIT", "FAILED", "Failed to set SharePoint token")
|
||||
|
|
@ -110,7 +110,7 @@ class ManagerSyncDelta:
|
|||
def _log_audit_event(self, action: str, status: str, details: str):
|
||||
"""Log audit events for sync operations to memory."""
|
||||
try:
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
user_id = str(self.eventUser.id) if self.eventUser else "system"
|
||||
log_entry = f"{timestamp} | {user_id} | {action} | {status} | {details}"
|
||||
self.sync_audit_log.append(log_entry)
|
||||
|
|
@ -146,7 +146,7 @@ class ManagerSyncDelta:
|
|||
return False
|
||||
|
||||
# Generate log filename with current timestamp
|
||||
timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
|
||||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y%m%d_%H%M%S")
|
||||
log_filename = f"log_{timestamp}.log"
|
||||
|
||||
# Create log content
|
||||
|
|
@ -273,7 +273,7 @@ class ManagerSyncDelta:
|
|||
logger.info(f"Using sync file: {sync_file_name}")
|
||||
|
||||
# Create list-based ticket interface (initialize connector by type)
|
||||
sync_interface = await self.services.ticket.createTicketInterfaceByType(
|
||||
sync_interface = await self.services.ticket._createTicketInterfaceByType(
|
||||
taskSyncDefinition=self.TASK_SYNC_DEFINITION,
|
||||
connectorType="Jira",
|
||||
connectorParams={
|
||||
|
|
@ -343,7 +343,6 @@ class ManagerSyncDelta:
|
|||
existing_headers["header1"] = csv_lines[0].rstrip('\r\n')
|
||||
existing_headers["header2"] = csv_lines[1].rstrip('\r\n')
|
||||
# Parse existing CSV rows after the two header lines
|
||||
import pandas as pd
|
||||
df_existing = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python')
|
||||
existing_data = df_existing.to_dict('records')
|
||||
except Exception:
|
||||
|
|
@ -362,7 +361,6 @@ class ManagerSyncDelta:
|
|||
csv_content = await self.services.sharepoint.download_file_by_path(
|
||||
site_id=self.targetSite['id'], file_path=file_path
|
||||
)
|
||||
import pandas as pd
|
||||
df = pd.read_csv(io.BytesIO(csv_content), skiprows=2, quoting=1, escapechar='\\', on_bad_lines='skip', engine='python')
|
||||
csv_rows = df.to_dict('records')
|
||||
self._log_audit_event("SYNC_IMPORT", "INFO", f"Importing {len(csv_rows)} CSV rows back to tickets")
|
||||
|
|
@ -390,7 +388,7 @@ class ManagerSyncDelta:
|
|||
|
||||
async def backupSharepointFile(self, *, filename: str) -> bool:
|
||||
try:
|
||||
timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
|
||||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y%m%d_%H%M%S")
|
||||
backup_filename = f"backup_{timestamp}_{filename}"
|
||||
await self.services.sharepoint.copy_file_async(
|
||||
site_id=self.targetSite['id'],
|
||||
|
|
@ -454,9 +452,7 @@ class ManagerSyncDelta:
|
|||
return merged_data, details
|
||||
|
||||
def createCsvContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes:
|
||||
import pandas as pd
|
||||
from io import StringIO
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
if existing_headers is None:
|
||||
existing_headers = {"header1": "Header 1", "header2": "Header 2"}
|
||||
if not data:
|
||||
|
|
@ -467,7 +463,6 @@ class ManagerSyncDelta:
|
|||
for column in df.columns:
|
||||
df[column] = df[column].astype("object").fillna("")
|
||||
df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False)
|
||||
import csv as csv_module
|
||||
header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), [])
|
||||
header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), [])
|
||||
if len(header2_row) > 1:
|
||||
|
|
@ -481,9 +476,7 @@ class ManagerSyncDelta:
|
|||
return out.getvalue().encode('utf-8')
|
||||
|
||||
def createExcelContent(self, data: list[dict], existing_headers: dict | None = None) -> bytes:
|
||||
import pandas as pd
|
||||
from io import BytesIO
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S UTC")
|
||||
if existing_headers is None:
|
||||
existing_headers = {"header1": "Header 1", "header2": "Header 2"}
|
||||
if not data:
|
||||
|
|
@ -494,7 +487,6 @@ class ManagerSyncDelta:
|
|||
for column in df.columns:
|
||||
df[column] = df[column].astype("object").fillna("")
|
||||
df[column] = df[column].astype(str).str.replace('\n', '\\n', regex=False).str.replace('"', '""', regex=False)
|
||||
import csv as csv_module
|
||||
header1_row = next(csv_module.reader([existing_headers.get("header1", "Header 1")]), [])
|
||||
header2_row = next(csv_module.reader([existing_headers.get("header2", "Header 2")]), [])
|
||||
if len(header2_row) > 1:
|
||||
|
|
@ -508,8 +500,6 @@ class ManagerSyncDelta:
|
|||
return buf.getvalue()
|
||||
|
||||
def parseExcelContent(self, excel_content: bytes) -> tuple[list[dict], dict]:
|
||||
import pandas as pd
|
||||
from io import BytesIO
|
||||
df = pd.read_excel(BytesIO(excel_content), engine='openpyxl', header=None)
|
||||
header_row1 = df.iloc[0:1].copy()
|
||||
header_row2 = df.iloc[1:2].copy()
|
||||
|
|
@ -674,96 +664,97 @@ class ManagerSyncDelta:
|
|||
result = extract_text_from_content(content)
|
||||
return result.strip()
|
||||
|
||||
# Utility: dump all ticket fields (name -> field id) to a text file (generic)
|
||||
async def dumpTicketFieldsToFile(
|
||||
*,
|
||||
filepath: str = "ticket_sync_fields.txt",
|
||||
connectorType: str = "Jira",
|
||||
connectorParams: dict | None = None,
|
||||
taskSyncDefinition: dict | None = None,
|
||||
) -> bool:
|
||||
"""Write available ticket fields (name -> field id) to a text file (generic)."""
|
||||
try:
|
||||
connectorParams = connectorParams or {}
|
||||
taskSyncDefinition = taskSyncDefinition or ManagerSyncDelta.TASK_SYNC_DEFINITION
|
||||
ticket_interface = await createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams,
|
||||
)
|
||||
attributes = await ticket_interface.connector_ticket.read_attributes()
|
||||
if not attributes:
|
||||
logger.warning("No ticket attributes returned; nothing to write.")
|
||||
# Utility: dump all ticket fields (name -> field id) to a text file (generic)
|
||||
async def dumpTicketFieldsToFile(self,
|
||||
*,
|
||||
filepath: str = "ticket_sync_fields.txt",
|
||||
connectorType: str = "Jira",
|
||||
connectorParams: dict | None = None,
|
||||
taskSyncDefinition: dict | None = None,
|
||||
) -> bool:
|
||||
"""Write available ticket fields (name -> field id) to a text file (generic)."""
|
||||
try:
|
||||
connectorParams = connectorParams or {}
|
||||
taskSyncDefinition = taskSyncDefinition or self.TASK_SYNC_DEFINITION
|
||||
ticket_interface = await self.services.ticket._createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams,
|
||||
)
|
||||
attributes = await ticket_interface.connector_ticket.read_attributes()
|
||||
if not attributes:
|
||||
logger.warning("No ticket attributes returned; nothing to write.")
|
||||
return False
|
||||
dir_name = os.path.dirname(filepath)
|
||||
if dir_name:
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
for attr in attributes:
|
||||
f.write(f"'{attr.field_name}': ['get', ['fields', '{attr.field}']]\n")
|
||||
logger.info(f"Wrote {len(attributes)} ticket fields to {filepath}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump ticket fields: {str(e)}")
|
||||
return False
|
||||
dir_name = os.path.dirname(filepath)
|
||||
if dir_name:
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
for attr in attributes:
|
||||
f.write(f"'{attr.field_name}': ['get', ['fields', '{attr.field}']]\n")
|
||||
logger.info(f"Wrote {len(attributes)} ticket fields to {filepath}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump ticket fields: {str(e)}")
|
||||
return False
|
||||
|
||||
# Utility: dump actual ticket data for debugging (generic)
|
||||
async def dumpTicketDataToFile(
|
||||
*,
|
||||
filepath: str = "ticket_sync_data.txt",
|
||||
connectorType: str = "Jira",
|
||||
connectorParams: dict | None = None,
|
||||
taskSyncDefinition: dict | None = None,
|
||||
sampleLimit: int = 5,
|
||||
) -> bool:
|
||||
"""Write actual ticket data to a text file for debugging field mapping (generic)."""
|
||||
try:
|
||||
connectorParams = connectorParams or {}
|
||||
taskSyncDefinition = taskSyncDefinition or ManagerSyncDelta.TASK_SYNC_DEFINITION
|
||||
ticket_interface = await createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams,
|
||||
)
|
||||
tickets = await ticket_interface.connector_ticket.read_tasks(limit=sampleLimit)
|
||||
if not tickets:
|
||||
logger.warning("No tickets returned; nothing to write.")
|
||||
# Utility: dump actual ticket data for debugging (generic)
|
||||
async def dumpTicketDataToFile(self,
|
||||
*,
|
||||
filepath: str = "ticket_sync_data.txt",
|
||||
connectorType: str = "Jira",
|
||||
connectorParams: dict | None = None,
|
||||
taskSyncDefinition: dict | None = None,
|
||||
sampleLimit: int = 5,
|
||||
) -> bool:
|
||||
"""Write actual ticket data to a text file for debugging field mapping (generic)."""
|
||||
try:
|
||||
connectorParams = connectorParams or {}
|
||||
taskSyncDefinition = taskSyncDefinition or self.TASK_SYNC_DEFINITION
|
||||
ticket_interface = await self.services.ticket._createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams,
|
||||
)
|
||||
tickets = await ticket_interface.connector_ticket.read_tasks(limit=sampleLimit)
|
||||
if not tickets:
|
||||
logger.warning("No tickets returned; nothing to write.")
|
||||
return False
|
||||
dir_name = os.path.dirname(filepath)
|
||||
if dir_name:
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
f.write("=== TICKET DATA DEBUG ===\n\n")
|
||||
for i, ticket in enumerate(tickets):
|
||||
f.write(f"--- TICKET {i+1} ---\n")
|
||||
f.write("Raw ticket data:\n")
|
||||
f.write(f"{ticket.data}\n\n")
|
||||
f.write("Field mapping analysis:\n")
|
||||
for fieldName, fieldPath in taskSyncDefinition.items():
|
||||
if fieldPath[0] == 'get':
|
||||
try:
|
||||
value = ticket.data
|
||||
for key in fieldPath[1]:
|
||||
if isinstance(value, dict) and key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
value = f"KEY_NOT_FOUND: {key}"
|
||||
break
|
||||
if isinstance(value, dict) and value.get("type") == "doc":
|
||||
pass # value = self.convertAdfToText(value)
|
||||
elif value is None:
|
||||
value = ""
|
||||
f.write(f" {fieldName}: {value}\n")
|
||||
except Exception as e:
|
||||
f.write(f" {fieldName}: ERROR - {str(e)}\n")
|
||||
f.write("\n" + "="*50 + "\n\n")
|
||||
logger.info(f"Wrote ticket data for {len(tickets)} tickets to {filepath}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump ticket data: {str(e)}")
|
||||
return False
|
||||
dir_name = os.path.dirname(filepath)
|
||||
if dir_name:
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
f.write("=== TICKET DATA DEBUG ===\n\n")
|
||||
for i, ticket in enumerate(tickets):
|
||||
f.write(f"--- TICKET {i+1} ---\n")
|
||||
f.write("Raw ticket data:\n")
|
||||
f.write(f"{ticket.data}\n\n")
|
||||
f.write("Field mapping analysis:\n")
|
||||
for fieldName, fieldPath in taskSyncDefinition.items():
|
||||
if fieldPath[0] == 'get':
|
||||
try:
|
||||
value = ticket.data
|
||||
for key in fieldPath[1]:
|
||||
if isinstance(value, dict) and key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
value = f"KEY_NOT_FOUND: {key}"
|
||||
break
|
||||
if isinstance(value, dict) and value.get("type") == "doc":
|
||||
pass # value = self.convertAdfToText(value)
|
||||
elif value is None:
|
||||
value = ""
|
||||
f.write(f" {fieldName}: {value}\n")
|
||||
except Exception as e:
|
||||
f.write(f" {fieldName}: ERROR - {str(e)}\n")
|
||||
f.write("\n" + "="*50 + "\n\n")
|
||||
logger.info(f"Wrote ticket data for {len(tickets)} tickets to {filepath}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump ticket data: {str(e)}")
|
||||
return False
|
||||
|
||||
# Main part of the module
|
||||
|
||||
async def performSync(eventUser) -> bool:
|
||||
"""Perform tickets to SharePoint synchronization
|
||||
|
||||
|
|
@ -800,12 +791,29 @@ async def performSync(eventUser) -> bool:
|
|||
# Create a global instance of ManagerSyncDelta to use for scheduled runs
|
||||
_sync_manager = None
|
||||
|
||||
def initialize_sync_manager(eventUser):
|
||||
def startSyncManager(eventUser):
|
||||
"""Initialize the global sync manager with the eventUser."""
|
||||
global _sync_manager
|
||||
if _sync_manager is None:
|
||||
_sync_manager = ManagerSyncDelta(eventUser)
|
||||
logger.info("Global sync manager initialized with eventUser")
|
||||
try:
|
||||
# Register scheduled job based on environment using the manager's services
|
||||
if _sync_manager.APP_ENV_TYPE == "prod":
|
||||
_sync_manager.services.utils.eventRegisterCron(
|
||||
job_id="syncDelta.syncTicket",
|
||||
func=scheduled_sync,
|
||||
cron_kwargs={"minute": "0,20,40"},
|
||||
replace_existing=True,
|
||||
coalesce=True,
|
||||
max_instances=1,
|
||||
misfire_grace_time=1800,
|
||||
)
|
||||
logger.info("Registered DG scheduler (every 20 minutes)")
|
||||
else:
|
||||
logger.info(f"Skipping DG scheduler registration for ticket sync in env: {_sync_manager.APP_ENV_TYPE}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register scheduler for DG sync: {str(e)}")
|
||||
return _sync_manager
|
||||
|
||||
async def scheduled_sync():
|
||||
|
|
@ -821,42 +829,4 @@ async def scheduled_sync():
|
|||
logger.error(f"Error in scheduled sync: {str(e)}")
|
||||
return False
|
||||
|
||||
# Initialize sync manager with eventUser
|
||||
try:
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
eventUser = getRootInterface().getUserByUsername("event")
|
||||
if eventUser:
|
||||
initialize_sync_manager(eventUser)
|
||||
logger.info("Sync manager initialized with eventUser")
|
||||
else:
|
||||
logger.error("Event user not found - cannot initialize sync manager")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize sync manager: {str(e)}")
|
||||
|
||||
# Register scheduled job on import using the shared event manager
|
||||
try:
|
||||
from modules.shared.eventManagement import eventManager
|
||||
|
||||
# Register scheduler only in production
|
||||
if APP_ENV_TYPE == "prod":
|
||||
# Schedule sync every 20 minutes (at minutes 00, 20, 40)
|
||||
eventManager.register_cron(
|
||||
job_id="dgsync",
|
||||
func=scheduled_sync,
|
||||
cron_kwargs={"minute": "0,20,40"},
|
||||
replace_existing=True,
|
||||
coalesce=True,
|
||||
max_instances=1,
|
||||
misfire_grace_time=1800,
|
||||
)
|
||||
logger.info("Registered DG ticket sync via EventManagement (every 20 minutes)")
|
||||
else:
|
||||
logger.info(f"Skipping DG scheduler registration for ticket sync in env: {APP_ENV_TYPE}")
|
||||
|
||||
# Run initial sync
|
||||
#import asyncio
|
||||
#asyncio.create_task(scheduled_sync())
|
||||
#logger.info("Initial sync scheduled")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register DG ticket sync: {str(e)}")
|
||||
# Scheduler registration and initialization are triggered by startSyncManager(eventUser)
|
||||
|
|
|
|||
|
|
@ -1,49 +1,282 @@
|
|||
import logging
|
||||
from typing import Dict, Any, List
|
||||
from typing import Dict, Any, List, Union
|
||||
from dataclasses import dataclass
|
||||
|
||||
from modules.connectors.connectorAiOpenai import AiOpenai
|
||||
from modules.connectors.connectorAiAnthropic import AiAnthropic
|
||||
from modules.datamodels.datamodelAi import AiCallOptions, AiCallRequest, AiCallResponse
|
||||
from modules.connectors.connectorAiLangdoc import AiLangdoc
|
||||
from modules.connectors.connectorAiTavily import ConnectorWeb
|
||||
from modules.datamodels.datamodelAi import (
|
||||
AiCallOptions,
|
||||
AiCallRequest,
|
||||
AiCallResponse,
|
||||
OperationType,
|
||||
ProcessingMode,
|
||||
Priority,
|
||||
ModelTags,
|
||||
OPERATION_TAG_MAPPING,
|
||||
PROCESSING_MODE_PRIORITY_MAPPING
|
||||
)
|
||||
from modules.datamodels.datamodelWeb import (
|
||||
WebCrawlActionResult,
|
||||
WebCrawlActionDocument,
|
||||
WebCrawlDocumentData,
|
||||
WebCrawlRequest,
|
||||
WebCrawlResultItem,
|
||||
WebScrapeActionResult,
|
||||
WebScrapeActionDocument,
|
||||
WebSearchDocumentData as WebScrapeDocumentData,
|
||||
WebScrapeRequest,
|
||||
WebScrapeResultItem,
|
||||
WebSearchActionResult,
|
||||
WebSearchActionDocument,
|
||||
WebSearchDocumentData,
|
||||
WebSearchRequest,
|
||||
WebSearchResultItem,
|
||||
)
|
||||
from modules.datamodels.datamodelWorkflow import ActionDocument
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Local model registry (connectors specification) belongs in interface layer, not service
|
||||
# Comprehensive model registry with capability tags and function mapping
|
||||
aiModels: Dict[str, Dict[str, Any]] = {
|
||||
"openai_gpt4o": {
|
||||
# OpenAI Models
|
||||
"openai_callAiBasic": {
|
||||
"connector": "openai",
|
||||
"function": "callAiBasic",
|
||||
"llmName": "gpt-4o",
|
||||
"contextLength": 128000,
|
||||
"costPer1kTokens": 0.03,
|
||||
"costPer1kTokensOutput": 0.06,
|
||||
"speedRating": 8,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["text_generation", "chat", "reasoning"],
|
||||
"tags": ["text", "chat", "reasoning", "general"]
|
||||
},
|
||||
"openai_gpt35": {
|
||||
"openai_callAiBasic_gpt35": {
|
||||
"connector": "openai",
|
||||
"function": "callAiBasic",
|
||||
"llmName": "gpt-3.5-turbo",
|
||||
"contextLength": 16000,
|
||||
"costPer1kTokens": 0.0015,
|
||||
"costPer1kTokensOutput": 0.002,
|
||||
"speedRating": 9,
|
||||
"qualityRating": 7,
|
||||
"capabilities": ["text_generation", "chat", "reasoning"],
|
||||
"tags": ["text", "chat", "reasoning", "general", "fast"]
|
||||
},
|
||||
"anthropic_claude": {
|
||||
"openai_callAiImage": {
|
||||
"connector": "openai",
|
||||
"function": "callAiImage",
|
||||
"llmName": "gpt-4o",
|
||||
"contextLength": 128000,
|
||||
"costPer1kTokens": 0.03,
|
||||
"costPer1kTokensOutput": 0.06,
|
||||
"speedRating": 7,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["image_analysis", "vision", "multimodal"],
|
||||
"tags": ["image", "vision", "multimodal"]
|
||||
},
|
||||
"openai_generateImage": {
|
||||
"connector": "openai",
|
||||
"function": "generateImage",
|
||||
"llmName": "dall-e-3",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.04,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 6,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["image_generation", "art", "visual_creation"],
|
||||
"tags": ["image_generation", "art", "visual"]
|
||||
},
|
||||
|
||||
# Anthropic Models
|
||||
"anthropic_callAiBasic": {
|
||||
"connector": "anthropic",
|
||||
"function": "callAiBasic",
|
||||
"llmName": "claude-3-5-sonnet-20241022",
|
||||
"contextLength": 200000,
|
||||
"costPer1kTokens": 0.015,
|
||||
"costPer1kTokensOutput": 0.075,
|
||||
"speedRating": 7,
|
||||
"qualityRating": 10,
|
||||
"capabilities": ["text_generation", "chat", "reasoning", "analysis"],
|
||||
"tags": ["text", "chat", "reasoning", "analysis", "high_quality"]
|
||||
},
|
||||
"anthropic_callAiImage": {
|
||||
"connector": "anthropic",
|
||||
"function": "callAiImage",
|
||||
"llmName": "claude-3-5-sonnet-20241022",
|
||||
"contextLength": 200000,
|
||||
"costPer1kTokens": 0.015,
|
||||
"costPer1kTokensOutput": 0.075,
|
||||
"speedRating": 7,
|
||||
"qualityRating": 10,
|
||||
"capabilities": ["image_analysis", "vision", "multimodal"],
|
||||
"tags": ["image", "vision", "multimodal", "high_quality"]
|
||||
},
|
||||
|
||||
# LangDoc Models
|
||||
"langdoc_callAiBasic": {
|
||||
"connector": "langdoc",
|
||||
"function": "callAiBasic",
|
||||
"llmName": "gpt-4o",
|
||||
"contextLength": 128000,
|
||||
"costPer1kTokens": 0.02,
|
||||
"costPer1kTokensOutput": 0.04,
|
||||
"speedRating": 8,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["text_generation", "chat", "reasoning"],
|
||||
"tags": ["text", "chat", "reasoning", "general", "cost_effective"]
|
||||
},
|
||||
"langdoc_callAiImage": {
|
||||
"connector": "langdoc",
|
||||
"function": "callAiImage",
|
||||
"llmName": "gpt-4o",
|
||||
"contextLength": 128000,
|
||||
"costPer1kTokens": 0.02,
|
||||
"costPer1kTokensOutput": 0.04,
|
||||
"speedRating": 7,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["image_analysis", "vision", "multimodal"],
|
||||
"tags": ["image", "vision", "multimodal", "cost_effective"]
|
||||
},
|
||||
"langdoc_generateImage": {
|
||||
"connector": "langdoc",
|
||||
"function": "generateImage",
|
||||
"llmName": "dall-e-3",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.04,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 6,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["image_generation", "art", "visual_creation"],
|
||||
"tags": ["image_generation", "art", "visual", "cost_effective"]
|
||||
},
|
||||
"langdoc_generateImageWithVariations": {
|
||||
"connector": "langdoc",
|
||||
"function": "generateImageWithVariations",
|
||||
"llmName": "dall-e-3",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.04,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 5,
|
||||
"qualityRating": 9,
|
||||
"capabilities": ["image_generation", "art", "visual_creation", "variations"],
|
||||
"tags": ["image_generation", "art", "visual", "variations", "cost_effective"]
|
||||
},
|
||||
"langdoc_generateImageWithChat": {
|
||||
"connector": "langdoc",
|
||||
"function": "generateImageWithChat",
|
||||
"llmName": "gpt-4o",
|
||||
"contextLength": 128000,
|
||||
"costPer1kTokens": 0.02,
|
||||
"costPer1kTokensOutput": 0.04,
|
||||
"speedRating": 6,
|
||||
"qualityRating": 8,
|
||||
"capabilities": ["image_generation", "chat", "visual_creation"],
|
||||
"tags": ["image_generation", "chat", "visual", "cost_effective"]
|
||||
},
|
||||
"langdoc_listModels": {
|
||||
"connector": "langdoc",
|
||||
"function": "listModels",
|
||||
"llmName": "api",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.0,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 9,
|
||||
"qualityRating": 5,
|
||||
"capabilities": ["model_listing", "api_info"],
|
||||
"tags": ["api", "info", "models"]
|
||||
},
|
||||
"langdoc_getModelInfo": {
|
||||
"connector": "langdoc",
|
||||
"function": "getModelInfo",
|
||||
"llmName": "api",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.0,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 9,
|
||||
"qualityRating": 5,
|
||||
"capabilities": ["model_info", "api_info"],
|
||||
"tags": ["api", "info", "models"]
|
||||
},
|
||||
|
||||
# Tavily Web Models
|
||||
"tavily_search": {
|
||||
"connector": "tavily",
|
||||
"function": "search",
|
||||
"llmName": "tavily-search",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.0,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 8,
|
||||
"qualityRating": 8,
|
||||
"capabilities": ["web_search", "information_retrieval", "url_discovery"],
|
||||
"tags": ["web", "search", "urls", "information"]
|
||||
},
|
||||
"tavily_crawl": {
|
||||
"connector": "tavily",
|
||||
"function": "crawl",
|
||||
"llmName": "tavily-extract",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.0,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 6,
|
||||
"qualityRating": 8,
|
||||
"capabilities": ["web_crawling", "content_extraction", "text_extraction"],
|
||||
"tags": ["web", "crawl", "extract", "content"]
|
||||
},
|
||||
"tavily_scrape": {
|
||||
"connector": "tavily",
|
||||
"function": "scrape",
|
||||
"llmName": "tavily-search-extract",
|
||||
"contextLength": 0,
|
||||
"costPer1kTokens": 0.0,
|
||||
"costPer1kTokensOutput": 0.0,
|
||||
"speedRating": 6,
|
||||
"qualityRating": 8,
|
||||
"capabilities": ["web_search", "web_crawling", "content_extraction", "information_retrieval"],
|
||||
"tags": ["web", "search", "crawl", "extract", "content", "information"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class AiObjects:
|
||||
"""Centralized AI interface: selects model and calls connector. No document handling."""
|
||||
"""Centralized AI interface: selects model and calls connector. Includes web functionality."""
|
||||
|
||||
def __init__(self):
|
||||
self.openaiService = AiOpenai()
|
||||
self.anthropicService = AiAnthropic()
|
||||
openaiService: AiOpenai
|
||||
anthropicService: AiAnthropic
|
||||
langdocService: AiLangdoc
|
||||
tavilyService: ConnectorWeb
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.openaiService is None:
|
||||
raise TypeError("openaiService must be provided")
|
||||
if self.anthropicService is None:
|
||||
raise TypeError("anthropicService must be provided")
|
||||
if self.langdocService is None:
|
||||
raise TypeError("langdocService must be provided")
|
||||
if self.tavilyService is None:
|
||||
raise TypeError("tavilyService must be provided")
|
||||
|
||||
@classmethod
|
||||
async def create(cls) -> "AiObjects":
|
||||
"""Create AiObjects instance with all connectors initialized."""
|
||||
openaiService = AiOpenai()
|
||||
anthropicService = AiAnthropic()
|
||||
langdocService = AiLangdoc()
|
||||
tavilyService = await ConnectorWeb.create()
|
||||
|
||||
return cls(
|
||||
openaiService=openaiService,
|
||||
anthropicService=anthropicService,
|
||||
langdocService=langdocService,
|
||||
tavilyService=tavilyService
|
||||
)
|
||||
|
||||
def _estimateCost(self, modelInfo: Dict[str, Any], contentSize: int) -> float:
|
||||
estimatedTokens = contentSize / 4
|
||||
|
|
@ -52,32 +285,85 @@ class AiObjects:
|
|||
return inputCost + outputCost
|
||||
|
||||
def _selectModel(self, prompt: str, context: str, options: AiCallOptions) -> str:
|
||||
"""Select the best model based on operation type, tags, and requirements."""
|
||||
totalSize = len(prompt.encode("utf-8")) + len(context.encode("utf-8"))
|
||||
candidates: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
# Determine required tags from operation type
|
||||
requiredTags = options.requiredTags
|
||||
if not requiredTags:
|
||||
requiredTags = OPERATION_TAG_MAPPING.get(options.operationType, [ModelTags.TEXT, ModelTags.CHAT])
|
||||
|
||||
# Override priority based on processing mode if not explicitly set
|
||||
effectivePriority = options.priority
|
||||
if options.priority == Priority.BALANCED:
|
||||
effectivePriority = PROCESSING_MODE_PRIORITY_MAPPING.get(options.processingMode, Priority.BALANCED)
|
||||
|
||||
logger.info(f"Model selection - Operation: {options.operationType}, Required tags: {requiredTags}, Priority: {effectivePriority}")
|
||||
|
||||
for name, info in aiModels.items():
|
||||
if totalSize > info["contextLength"] * 0.8:
|
||||
# Check context length
|
||||
if info["contextLength"] > 0 and totalSize > info["contextLength"] * 0.8:
|
||||
continue
|
||||
|
||||
# Check cost constraints
|
||||
if options.maxCost is not None:
|
||||
if self._estimateCost(info, totalSize) > options.maxCost:
|
||||
continue
|
||||
|
||||
# Check required tags/capabilities
|
||||
modelTags = info.get("tags", [])
|
||||
if requiredTags and not any(tag in modelTags for tag in requiredTags):
|
||||
continue
|
||||
|
||||
# Check processing mode requirements
|
||||
if options.processingMode == ProcessingMode.DETAILED and ModelTags.FAST in modelTags:
|
||||
# Skip fast models for detailed processing
|
||||
continue
|
||||
|
||||
candidates[name] = info
|
||||
|
||||
if not candidates:
|
||||
return "openai_gpt35"
|
||||
if options.priority == "speed":
|
||||
# Fallback based on operation type
|
||||
if options.operationType == OperationType.IMAGE_ANALYSIS:
|
||||
return "openai_callAiImage"
|
||||
elif options.operationType == OperationType.IMAGE_GENERATION:
|
||||
return "openai_generateImage"
|
||||
elif options.operationType == OperationType.WEB_RESEARCH:
|
||||
return "langdoc_callAiBasic"
|
||||
else:
|
||||
return "openai_callAiBasic_gpt35"
|
||||
|
||||
# Select based on priority
|
||||
if effectivePriority == Priority.SPEED:
|
||||
return max(candidates, key=lambda k: candidates[k]["speedRating"])
|
||||
if options.priority == "quality":
|
||||
elif effectivePriority == Priority.QUALITY:
|
||||
return max(candidates, key=lambda k: candidates[k]["qualityRating"])
|
||||
if options.priority == "cost":
|
||||
return min(candidates, key=lambda k: candidates[k]["costPer1kTokens"])
|
||||
def balancedScore(name: str) -> float:
|
||||
info = candidates[name]
|
||||
return info["qualityRating"] * 0.4 + info["speedRating"] * 0.3 + (10 - info["costPer1kTokens"] * 1000) * 0.3
|
||||
return max(candidates, key=balancedScore)
|
||||
elif effectivePriority == Priority.COST:
|
||||
return min(candidates, key=lambda k: candidates[k]["costPer1kTokens"])
|
||||
else: # BALANCED
|
||||
def balancedScore(name: str) -> float:
|
||||
info = candidates[name]
|
||||
return info["qualityRating"] * 0.4 + info["speedRating"] * 0.3 + (10 - info["costPer1kTokens"] * 1000) * 0.3
|
||||
|
||||
return max(candidates, key=balancedScore)
|
||||
|
||||
def _connectorFor(self, modelName: str):
|
||||
return self.openaiService if aiModels[modelName]["connector"] == "openai" else self.anthropicService
|
||||
"""Get the appropriate connector for the model."""
|
||||
connectorType = aiModels[modelName]["connector"]
|
||||
if connectorType == "openai":
|
||||
return self.openaiService
|
||||
elif connectorType == "anthropic":
|
||||
return self.anthropicService
|
||||
elif connectorType == "langdoc":
|
||||
return self.langdocService
|
||||
elif connectorType == "tavily":
|
||||
return self.tavilyService
|
||||
else:
|
||||
raise ValueError(f"Unknown connector type: {connectorType}")
|
||||
|
||||
async def call(self, request: AiCallRequest) -> AiCallResponse:
|
||||
"""Call AI model for text generation."""
|
||||
prompt = request.prompt
|
||||
context = request.context or ""
|
||||
options = request.options
|
||||
|
|
@ -94,6 +380,7 @@ class AiObjects:
|
|||
if options.compressContext and len(context.encode("utf-8")) > 70000:
|
||||
context = maybeTruncate(context, 70000)
|
||||
|
||||
# Select model for text generation
|
||||
modelName = self._selectModel(prompt, context, options)
|
||||
|
||||
messages: List[Dict[str, Any]] = []
|
||||
|
|
@ -102,11 +389,17 @@ class AiObjects:
|
|||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
connector = self._connectorFor(modelName)
|
||||
if aiModels[modelName]["connector"] == "openai":
|
||||
content = await connector.callAiBasic(messages)
|
||||
functionName = aiModels[modelName]["function"]
|
||||
|
||||
# Call the appropriate function
|
||||
if functionName == "callAiBasic":
|
||||
if aiModels[modelName]["connector"] == "openai":
|
||||
content = await connector.callAiBasic(messages)
|
||||
else:
|
||||
response = await connector.callAiBasic(messages)
|
||||
content = response["choices"][0]["message"]["content"]
|
||||
else:
|
||||
response = await connector.callAiBasic(messages)
|
||||
content = response["choices"][0]["message"]["content"]
|
||||
raise ValueError(f"Function {functionName} not supported for text generation")
|
||||
|
||||
# Estimate cost/tokens
|
||||
totalSize = len((prompt + context).encode("utf-8"))
|
||||
|
|
@ -115,3 +408,106 @@ class AiObjects:
|
|||
|
||||
return AiCallResponse(content=content, modelName=modelName, usedTokens=usedTokens, costEstimate=cost)
|
||||
|
||||
async def callImage(self, prompt: str, imageData: Union[str, bytes], mimeType: str = None, options: AiCallOptions = None) -> str:
|
||||
"""Call AI model for image analysis."""
|
||||
if options is None:
|
||||
options = AiCallOptions(operationType=OperationType.IMAGE_ANALYSIS)
|
||||
|
||||
# Select model for image analysis
|
||||
modelName = self._selectModel(prompt, "", options)
|
||||
|
||||
connector = self._connectorFor(modelName)
|
||||
functionName = aiModels[modelName]["function"]
|
||||
|
||||
if functionName == "callAiImage":
|
||||
return await connector.callAiImage(prompt, imageData, mimeType)
|
||||
else:
|
||||
raise ValueError(f"Function {functionName} not supported for image analysis")
|
||||
|
||||
async def generateImage(self, prompt: str, size: str = "1024x1024", quality: str = "standard", style: str = "vivid", options: AiCallOptions = None) -> Dict[str, Any]:
|
||||
"""Generate an image using AI."""
|
||||
if options is None:
|
||||
options = AiCallOptions(operationType=OperationType.IMAGE_GENERATION)
|
||||
|
||||
# Select model for image generation
|
||||
modelName = self._selectModel(prompt, "", options)
|
||||
|
||||
connector = self._connectorFor(modelName)
|
||||
functionName = aiModels[modelName]["function"]
|
||||
|
||||
if functionName == "generateImage":
|
||||
return await connector.generateImage(prompt, size, quality, style)
|
||||
elif functionName == "generateImageWithVariations":
|
||||
results = await connector.generateImageWithVariations(prompt, 1, size, quality, style)
|
||||
return results[0] if results else {}
|
||||
elif functionName == "generateImageWithChat":
|
||||
content = await connector.generateImageWithChat(prompt, size, quality, style)
|
||||
return {"content": content, "success": True}
|
||||
else:
|
||||
raise ValueError(f"Function {functionName} not supported for image generation")
|
||||
|
||||
# Web functionality methods
|
||||
async def webSearch(self, web_search_request: WebSearchRequest) -> WebSearchActionResult:
|
||||
"""Perform web search using Tavily."""
|
||||
return await self.tavilyService.search(web_search_request)
|
||||
|
||||
async def webCrawl(self, web_crawl_request: WebCrawlRequest) -> WebCrawlActionResult:
|
||||
"""Crawl web pages using Tavily."""
|
||||
return await self.tavilyService.crawl(web_crawl_request)
|
||||
|
||||
async def webScrape(self, web_scrape_request: WebScrapeRequest) -> WebScrapeActionResult:
|
||||
"""Scrape web content using Tavily."""
|
||||
return await self.tavilyService.scrape(web_scrape_request)
|
||||
|
||||
async def webQuery(self, query: str, context: str = "", options: AiCallOptions = None) -> str:
|
||||
"""Use LangDoc AI to provide the best answers for web-related queries."""
|
||||
if options is None:
|
||||
options = AiCallOptions(operationType=OperationType.WEB_RESEARCH)
|
||||
|
||||
# Create a comprehensive prompt for web queries
|
||||
webPrompt = f"""You are an expert web researcher and information analyst. Please provide a comprehensive and accurate answer to the following web-related query.
|
||||
|
||||
Query: {query}
|
||||
|
||||
{f"Additional Context: {context}" if context else ""}
|
||||
|
||||
Please provide:
|
||||
1. A clear, well-structured answer to the query
|
||||
2. Key points and important details
|
||||
3. Relevant insights and analysis
|
||||
4. Any important considerations or caveats
|
||||
5. Suggestions for further research if applicable
|
||||
|
||||
Format your response in a clear, professional manner that would be helpful for someone researching this topic."""
|
||||
|
||||
messages = [{"role": "user", "content": webPrompt}]
|
||||
|
||||
try:
|
||||
# Use LangDoc for the best answers
|
||||
response = await self.langdocService.callAiBasic(messages)
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"LangDoc web query failed: {str(e)}")
|
||||
raise Exception(f"Failed to process web query: {str(e)}")
|
||||
|
||||
# Utility methods
|
||||
async def listAvailableModels(self, connectorType: str = None) -> List[Dict[str, Any]]:
|
||||
"""List available models, optionally filtered by connector type."""
|
||||
if connectorType:
|
||||
return [info for name, info in aiModels.items() if info["connector"] == connectorType]
|
||||
return list(aiModels.values())
|
||||
|
||||
async def getModelInfo(self, modelName: str) -> Dict[str, Any]:
|
||||
"""Get information about a specific model."""
|
||||
if modelName not in aiModels:
|
||||
raise ValueError(f"Model {modelName} not found")
|
||||
return aiModels[modelName]
|
||||
|
||||
async def getModelsByCapability(self, capability: str) -> List[str]:
|
||||
"""Get model names that support a specific capability."""
|
||||
return [name for name, info in aiModels.items() if capability in info.get("capabilities", [])]
|
||||
|
||||
async def getModelsByTag(self, tag: str) -> List[str]:
|
||||
"""Get model names that have a specific tag."""
|
||||
return [name for name, info in aiModels.items() if tag in info.get("tags", [])]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,981 +0,0 @@
|
|||
"""
|
||||
Chat model classes for the chat system.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime, UTC
|
||||
import uuid
|
||||
from enum import Enum
|
||||
|
||||
from modules.shared.attributeUtils import register_model_labels, ModelMixin
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
# ===== Method Models =====
|
||||
|
||||
class ActionDocument(BaseModel, ModelMixin):
|
||||
"""Clear document structure for action results"""
|
||||
documentName: str = Field(description="Name of the document")
|
||||
documentData: Any = Field(description="Content/data of the document")
|
||||
mimeType: str = Field(description="MIME type of the document")
|
||||
|
||||
# Register labels for ActionDocument
|
||||
register_model_labels(
|
||||
"ActionDocument",
|
||||
{"en": "Action Document", "fr": "Document d'action"},
|
||||
{
|
||||
"documentName": {"en": "Document Name", "fr": "Nom du document"},
|
||||
"documentData": {"en": "Document Data", "fr": "Données du document"},
|
||||
"mimeType": {"en": "MIME Type", "fr": "Type MIME"}
|
||||
}
|
||||
)
|
||||
|
||||
class ActionResult(BaseModel, ModelMixin):
|
||||
"""Clean action result with documents as primary output
|
||||
|
||||
IMPORTANT: Action methods should NOT set resultLabel in their return value.
|
||||
The resultLabel is managed by the action handler using the action's execResultLabel
|
||||
from the action plan. This ensures consistent document routing throughout the workflow.
|
||||
"""
|
||||
# Core result
|
||||
success: bool = Field(description="Whether execution succeeded")
|
||||
error: Optional[str] = Field(None, description="Error message if failed")
|
||||
|
||||
# Primary output - documents
|
||||
documents: List[ActionDocument] = Field(default_factory=list, description="Document outputs")
|
||||
resultLabel: Optional[str] = Field(None, description="Label for document routing (set by action handler, not by action methods)")
|
||||
|
||||
@classmethod
|
||||
def isSuccess(cls, documents: List[ActionDocument] = None) -> 'ActionResult':
|
||||
"""Create a successful action result
|
||||
|
||||
Note: Do not set resultLabel - this is managed by the action handler
|
||||
"""
|
||||
return cls(
|
||||
success=True,
|
||||
documents=documents or []
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def isFailure(cls, error: str, documents: List[ActionDocument] = None) -> 'ActionResult':
|
||||
"""Create a failed action result
|
||||
|
||||
Note: Do not set resultLabel - this is managed by the action handler
|
||||
"""
|
||||
return cls(
|
||||
success=False,
|
||||
documents=documents or [],
|
||||
error=error
|
||||
)
|
||||
|
||||
# Register labels for ActionResult
|
||||
register_model_labels(
|
||||
"ActionResult",
|
||||
{"en": "Action Result", "fr": "Résultat de l'action"},
|
||||
{
|
||||
"success": {"en": "Success", "fr": "Succès"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"documents": {"en": "Documents", "fr": "Documents"},
|
||||
"resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"}
|
||||
}
|
||||
)
|
||||
|
||||
# ===== Minimal ReAct-style Workflow Models =====
|
||||
|
||||
class ActionSelection(BaseModel, ModelMixin):
|
||||
"""Model for selecting exactly one action in a step"""
|
||||
method: str = Field(description="Method to execute (e.g., web, document, ai)")
|
||||
name: str = Field(description="Action name within the method (e.g., search, extract)")
|
||||
|
||||
register_model_labels(
|
||||
"ActionSelection",
|
||||
{"en": "Action Selection", "fr": "Sélection d'action"},
|
||||
{
|
||||
"method": {"en": "Method", "fr": "Méthode"},
|
||||
"name": {"en": "Action Name", "fr": "Nom de l'action"}
|
||||
}
|
||||
)
|
||||
|
||||
class ActionParameters(BaseModel, ModelMixin):
|
||||
"""Model for specifying only the parameters for the selected action"""
|
||||
parameters: Dict[str, Any] = Field(default_factory=dict, description="Parameters to execute the selected action")
|
||||
|
||||
register_model_labels(
|
||||
"ActionParameters",
|
||||
{"en": "Action Parameters", "fr": "Paramètres d'action"},
|
||||
{
|
||||
"parameters": {"en": "Parameters", "fr": "Paramètres"}
|
||||
}
|
||||
)
|
||||
|
||||
class ObservationPreview(BaseModel, ModelMixin):
|
||||
"""Compact preview item for observations"""
|
||||
name: str = Field(description="Document name or URL label")
|
||||
mime: str = Field(description="MIME type or kind")
|
||||
snippet: str = Field(description="Short snippet or summary")
|
||||
|
||||
register_model_labels(
|
||||
"ObservationPreview",
|
||||
{"en": "Observation Preview", "fr": "Aperçu d'observation"},
|
||||
{
|
||||
"name": {"en": "Name", "fr": "Nom"},
|
||||
"mime": {"en": "MIME", "fr": "MIME"},
|
||||
"snippet": {"en": "Snippet", "fr": "Extrait"}
|
||||
}
|
||||
)
|
||||
|
||||
class Observation(BaseModel, ModelMixin):
|
||||
"""Compact observation returned to the model after each action"""
|
||||
success: bool = Field(description="Action execution success flag")
|
||||
resultLabel: str = Field(description="Deterministic label for produced documents")
|
||||
documentsCount: int = Field(description="Number of produced documents")
|
||||
previews: List[ObservationPreview] = Field(default_factory=list, description="Compact previews of outputs")
|
||||
notes: List[str] = Field(default_factory=list, description="Short notes or key facts")
|
||||
|
||||
register_model_labels(
|
||||
"Observation",
|
||||
{"en": "Observation", "fr": "Observation"},
|
||||
{
|
||||
"success": {"en": "Success", "fr": "Succès"},
|
||||
"resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"},
|
||||
"documentsCount": {"en": "Documents Count", "fr": "Nombre de documents"},
|
||||
"previews": {"en": "Previews", "fr": "Aperçus"},
|
||||
"notes": {"en": "Notes", "fr": "Notes"}
|
||||
}
|
||||
)
|
||||
|
||||
# ===== Base Enums and Simple Models =====
|
||||
|
||||
class TaskStatus(str, Enum):
|
||||
"""Task status enumeration"""
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
# Register labels for TaskStatus
|
||||
register_model_labels(
|
||||
"TaskStatus",
|
||||
{"en": "Task Status", "fr": "Statut de la tâche"},
|
||||
{
|
||||
"PENDING": {"en": "Pending", "fr": "En attente"},
|
||||
"RUNNING": {"en": "Running", "fr": "En cours"},
|
||||
"COMPLETED": {"en": "Completed", "fr": "Terminé"},
|
||||
"FAILED": {"en": "Failed", "fr": "Échec"},
|
||||
"CANCELLED": {"en": "Cancelled", "fr": "Annulé"},
|
||||
"ROLLED_BACK": {"en": "Rolled Back", "fr": "Annulé"}
|
||||
}
|
||||
)
|
||||
|
||||
class UserInputRequest(BaseModel, ModelMixin):
|
||||
"""Data model for a user input request"""
|
||||
prompt: str = Field(description="Prompt for the user")
|
||||
listFileId: List[str] = Field(default_factory=list, description="List of file IDs")
|
||||
userLanguage: str = Field(default="en", description="User's preferred language")
|
||||
|
||||
# Register labels for UserInputRequest
|
||||
register_model_labels(
|
||||
"UserInputRequest",
|
||||
{"en": "User Input Request", "fr": "Demande de saisie utilisateur"},
|
||||
{
|
||||
"prompt": {"en": "Prompt", "fr": "Invite"},
|
||||
"listFileId": {"en": "File IDs", "fr": "IDs des fichiers"},
|
||||
"userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"}
|
||||
}
|
||||
)
|
||||
|
||||
# ===== Content Models =====
|
||||
|
||||
class ContentMetadata(BaseModel, ModelMixin):
|
||||
"""Metadata for content items"""
|
||||
size: int = Field(description="Content size in bytes")
|
||||
pages: Optional[int] = Field(None, description="Number of pages for multi-page content")
|
||||
error: Optional[str] = Field(None, description="Processing error if any")
|
||||
width: Optional[int] = Field(None, description="Width in pixels for images/videos")
|
||||
height: Optional[int] = Field(None, description="Height in pixels for images/videos")
|
||||
colorMode: Optional[str] = Field(None, description="Color mode (e.g., RGB, CMYK, grayscale)")
|
||||
fps: Optional[float] = Field(None, description="Frames per second for videos")
|
||||
durationSec: Optional[float] = Field(None, description="Duration in seconds for videos/audio")
|
||||
mimeType: str = Field(description="MIME type of the content")
|
||||
base64Encoded: bool = Field(description="Whether the data is base64 encoded")
|
||||
|
||||
# Register labels for ContentMetadata
|
||||
register_model_labels(
|
||||
"ContentMetadata",
|
||||
{"en": "Content Metadata", "fr": "Métadonnées du contenu"},
|
||||
{
|
||||
"size": {"en": "Size", "fr": "Taille"},
|
||||
"pages": {"en": "Pages", "fr": "Pages"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"width": {"en": "Width", "fr": "Largeur"},
|
||||
"height": {"en": "Height", "fr": "Hauteur"},
|
||||
"colorMode": {"en": "Color Mode", "fr": "Mode de couleur"},
|
||||
"fps": {"en": "FPS", "fr": "IPS"},
|
||||
"durationSec": {"en": "Duration", "fr": "Durée"},
|
||||
"mimeType": {"en": "MIME Type", "fr": "Type MIME"},
|
||||
"base64Encoded": {"en": "Base64 Encoded", "fr": "Encodé en Base64"}
|
||||
}
|
||||
)
|
||||
|
||||
class ContentItem(BaseModel, ModelMixin):
|
||||
"""Individual content item from a document"""
|
||||
label: str = Field(description="Content label (e.g., tab name, tag name)")
|
||||
data: str = Field(description="Extracted text content")
|
||||
metadata: ContentMetadata = Field(description="Content metadata")
|
||||
|
||||
# Register labels for ContentItem
|
||||
register_model_labels(
|
||||
"ContentItem",
|
||||
{"en": "Content Item", "fr": "Élément de contenu"},
|
||||
{
|
||||
"label": {"en": "Label", "fr": "Étiquette"},
|
||||
"data": {"en": "Data", "fr": "Données"},
|
||||
"metadata": {"en": "Metadata", "fr": "Métadonnées"}
|
||||
}
|
||||
)
|
||||
|
||||
class ChatDocument(BaseModel, ModelMixin):
|
||||
"""Data model for a chat document"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
|
||||
messageId: str = Field(description="Foreign key to message")
|
||||
fileId: str = Field(description="Foreign key to file")
|
||||
|
||||
# Direct file attributes (copied from file object)
|
||||
fileName: str = Field(description="Name of the file")
|
||||
fileSize: int = Field(description="Size of the file")
|
||||
mimeType: str = Field(description="MIME type of the file")
|
||||
|
||||
# Workflow context fields
|
||||
roundNumber: Optional[int] = Field(None, description="Round number in workflow")
|
||||
taskNumber: Optional[int] = Field(None, description="Task number within round")
|
||||
actionNumber: Optional[int] = Field(None, description="Action number within task")
|
||||
|
||||
# Reference to action that created this document
|
||||
actionId: Optional[str] = Field(None, description="ID of the action that created this document")
|
||||
|
||||
|
||||
|
||||
# Register labels for ChatDocument
|
||||
register_model_labels(
|
||||
"ChatDocument",
|
||||
{"en": "Chat Document", "fr": "Document de chat"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"messageId": {"en": "Message ID", "fr": "ID du message"},
|
||||
"fileId": {"en": "File ID", "fr": "ID du fichier"},
|
||||
"fileName": {"en": "File Name", "fr": "Nom du fichier"},
|
||||
"fileSize": {"en": "File Size", "fr": "Taille du fichier"},
|
||||
"mimeType": {"en": "MIME Type", "fr": "Type MIME"},
|
||||
"roundNumber": {"en": "Round Number", "fr": "Numéro de tour"},
|
||||
"taskNumber": {"en": "Task Number", "fr": "Numéro de tâche"},
|
||||
"actionNumber": {"en": "Action Number", "fr": "Numéro d'action"},
|
||||
"actionId": {"en": "Action ID", "fr": "ID de l'action"}
|
||||
}
|
||||
)
|
||||
|
||||
class DocumentExchange(BaseModel, ModelMixin):
|
||||
"""Data model for document exchange between AI actions"""
|
||||
documentsLabel: str = Field(description="Label for the set of documents")
|
||||
documents: List[str] = Field(default_factory=list, description="List of document references")
|
||||
|
||||
# Register labels for DocumentExchange
|
||||
register_model_labels(
|
||||
"DocumentExchange",
|
||||
{"en": "Document Exchange", "fr": "Échange de documents"},
|
||||
{
|
||||
"documentsLabel": {"en": "Documents Label", "fr": "Label des documents"},
|
||||
"documents": {"en": "Documents", "fr": "Documents"}
|
||||
}
|
||||
)
|
||||
|
||||
class ExtractedContent(BaseModel, ModelMixin):
|
||||
"""Data model for extracted content"""
|
||||
id: str = Field(description="Reference to source ChatDocument")
|
||||
contents: List[ContentItem] = Field(default_factory=list, description="List of content items")
|
||||
|
||||
# Register labels for ExtractedContent
|
||||
register_model_labels(
|
||||
"ExtractedContent",
|
||||
{"en": "Extracted Content", "fr": "Contenu extrait"},
|
||||
{
|
||||
"objectId": {"en": "Object ID", "fr": "ID de l'objet"},
|
||||
"objectType": {"en": "Object Type", "fr": "Type d'objet"},
|
||||
"contents": {"en": "Contents", "fr": "Contenus"}
|
||||
}
|
||||
)
|
||||
|
||||
# ===== Task Models =====
|
||||
|
||||
class TaskAction(BaseModel, ModelMixin):
|
||||
"""Model for task actions"""
|
||||
id: str = Field(..., description="Action ID")
|
||||
execMethod: str = Field(..., description="Method to execute")
|
||||
execAction: str = Field(..., description="Action to perform")
|
||||
execParameters: Dict[str, Any] = Field(default_factory=dict, description="Action parameters")
|
||||
execResultLabel: Optional[str] = Field(None, description="Label for the set of result documents")
|
||||
# NEW: Optional document format specification
|
||||
expectedDocumentFormats: Optional[List[Dict[str, str]]] = Field(None, description="Expected document formats (optional)")
|
||||
|
||||
# User message in user's language
|
||||
userMessage: Optional[str] = Field(None, description="User-friendly message in user's language")
|
||||
|
||||
status: TaskStatus = Field(default=TaskStatus.PENDING, description="Action status")
|
||||
error: Optional[str] = Field(None, description="Error message if action failed")
|
||||
retryCount: int = Field(default=0, description="Number of retries attempted")
|
||||
retryMax: int = Field(default=3, description="Maximum number of retries")
|
||||
processingTime: Optional[float] = Field(None, description="Processing time in seconds")
|
||||
timestamp: float = Field(default_factory=get_utc_timestamp, description="When the action was executed (UTC timestamp in seconds)")
|
||||
result: Optional[str] = Field(None, description="Result of the action")
|
||||
resultDocuments: Optional[List[ChatDocument]] = Field(None, description="Result documents from the action")
|
||||
|
||||
def isSuccessful(self) -> bool:
|
||||
"""Check if action was successful"""
|
||||
return self.status == TaskStatus.COMPLETED
|
||||
|
||||
def hasError(self) -> bool:
|
||||
"""Check if action has an error"""
|
||||
return self.status == TaskStatus.FAILED
|
||||
|
||||
def getErrorMessage(self) -> Optional[str]:
|
||||
"""Get error message if any"""
|
||||
return self.error if self.hasError() else None
|
||||
|
||||
def setError(self, error: str) -> None:
|
||||
"""Set action error"""
|
||||
self.error = error
|
||||
self.status = TaskStatus.FAILED
|
||||
|
||||
def setSuccess(self) -> None:
|
||||
"""Set action as successful"""
|
||||
self.status = TaskStatus.COMPLETED
|
||||
self.error = None
|
||||
|
||||
# Register labels for TaskAction
|
||||
register_model_labels(
|
||||
"TaskAction",
|
||||
{"en": "Task Action", "fr": "Action de tâche"},
|
||||
{
|
||||
"id": {"en": "Action ID", "fr": "ID de l'action"},
|
||||
"execMethod": {"en": "Method", "fr": "Méthode"},
|
||||
"execAction": {"en": "Action", "fr": "Action"},
|
||||
"execParameters": {"en": "Parameters", "fr": "Paramètres"},
|
||||
"execResultLabel": {"en": "Result Label", "fr": "Label du résultat"},
|
||||
"expectedDocumentFormats": {"en": "Expected Document Formats", "fr": "Formats de documents attendus"},
|
||||
"userMessage": {"en": "User Message", "fr": "Message utilisateur"},
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"retryCount": {"en": "Retry Count", "fr": "Nombre de tentatives"},
|
||||
"retryMax": {"en": "Max Retries", "fr": "Tentatives max"},
|
||||
"processingTime": {"en": "Processing Time", "fr": "Temps de traitement"},
|
||||
"timestamp": {"en": "Timestamp", "fr": "Horodatage"},
|
||||
"result": {"en": "Result", "fr": "Résultat"},
|
||||
"resultDocuments": {"en": "Result Documents", "fr": "Documents de résultat"}
|
||||
}
|
||||
)
|
||||
|
||||
class TaskResult(BaseModel, ModelMixin):
|
||||
"""Model for task results"""
|
||||
taskId: str = Field(..., description="Task ID")
|
||||
status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status")
|
||||
success: bool = Field(..., description="Whether the task was successful")
|
||||
feedback: Optional[str] = Field(None, description="Task feedback message")
|
||||
error: Optional[str] = Field(None, description="Error message if task failed")
|
||||
|
||||
# Register labels for TaskResult
|
||||
register_model_labels(
|
||||
"TaskResult",
|
||||
{"en": "Task Result", "fr": "Résultat de tâche"},
|
||||
{
|
||||
"taskId": {"en": "Task ID", "fr": "ID de la tâche"},
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"success": {"en": "Success", "fr": "Succès"},
|
||||
"feedback": {"en": "Feedback", "fr": "Retour"},
|
||||
"error": {"en": "Error", "fr": "Erreur"}
|
||||
}
|
||||
)
|
||||
|
||||
class TaskItem(BaseModel, ModelMixin):
|
||||
"""Model for workflow tasks"""
|
||||
id: str = Field(..., description="Task ID")
|
||||
workflowId: str = Field(..., description="Workflow ID")
|
||||
userInput: str = Field(..., description="User input that triggered the task")
|
||||
status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status")
|
||||
error: Optional[str] = Field(None, description="Error message if task failed")
|
||||
startedAt: Optional[float] = Field(None, description="When the task started (UTC timestamp in seconds)")
|
||||
finishedAt: Optional[float] = Field(None, description="When the task finished (UTC timestamp in seconds)")
|
||||
actionList: List[TaskAction] = Field(default_factory=list, description="List of actions to execute")
|
||||
retryCount: int = Field(default=0, description="Number of retries attempted")
|
||||
retryMax: int = Field(default=3, description="Maximum number of retries")
|
||||
rollbackOnFailure: bool = Field(default=True, description="Whether to rollback on failure")
|
||||
dependencies: List[str] = Field(default_factory=list, description="List of task IDs this task depends on")
|
||||
feedback: Optional[str] = Field(None, description="Task feedback message")
|
||||
processingTime: Optional[float] = Field(None, description="Total processing time in seconds")
|
||||
resultLabels: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Map of result labels to their values")
|
||||
|
||||
def isSuccessful(self) -> bool:
|
||||
"""Check if task was successful"""
|
||||
return self.status == TaskStatus.COMPLETED
|
||||
|
||||
def hasError(self) -> bool:
|
||||
"""Check if task has an error"""
|
||||
return self.status == TaskStatus.FAILED
|
||||
|
||||
def getErrorMessage(self) -> Optional[str]:
|
||||
"""Get error message if any"""
|
||||
return self.error if self.hasError() else None
|
||||
|
||||
def getResultDocuments(self) -> List[ChatDocument]:
|
||||
"""Get all documents from all successful actions"""
|
||||
documents = []
|
||||
for action in self.actionList:
|
||||
if action.isSuccessful() and action.resultDocuments:
|
||||
documents.extend(action.resultDocuments)
|
||||
return documents
|
||||
|
||||
def getResultDocumentLabel(self) -> Optional[str]:
|
||||
"""Get the label for the result documents"""
|
||||
for action in self.actionList:
|
||||
if action.isSuccessful() and action.execResultLabel:
|
||||
return action.execResultLabel
|
||||
return None
|
||||
|
||||
def getResultLabel(self, label: str) -> Optional[Any]:
|
||||
"""Get value for a specific result label"""
|
||||
return self.resultLabels.get(label) if self.resultLabels else None
|
||||
|
||||
# Register labels for TaskItem
|
||||
register_model_labels(
|
||||
"TaskItem",
|
||||
{"en": "Task", "fr": "Tâche"},
|
||||
{
|
||||
"id": {"en": "Task ID", "fr": "ID de la tâche"},
|
||||
"workflowId": {"en": "Workflow ID", "fr": "ID du workflow"},
|
||||
"userInput": {"en": "User Input", "fr": "Entrée utilisateur"},
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"startedAt": {"en": "Started At", "fr": "Démarré à"},
|
||||
"finishedAt": {"en": "Finished At", "fr": "Terminé à"},
|
||||
"actionList": {"en": "Actions", "fr": "Actions"},
|
||||
"retryCount": {"en": "Retry Count", "fr": "Nombre de tentatives"},
|
||||
"retryMax": {"en": "Max Retries", "fr": "Tentatives max"},
|
||||
"rollbackOnFailure": {"en": "Rollback On Failure", "fr": "Annuler en cas d'échec"},
|
||||
"dependencies": {"en": "Dependencies", "fr": "Dépendances"},
|
||||
"feedback": {"en": "Feedback", "fr": "Retour"},
|
||||
"processingTime": {"en": "Processing Time", "fr": "Temps de traitement"}
|
||||
}
|
||||
)
|
||||
|
||||
class ChatStat(BaseModel, ModelMixin):
|
||||
"""Data model for chat statistics - ONLY statistics, not workflow progress"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
|
||||
workflowId: Optional[str] = Field(None, description="Foreign key to workflow (for workflow stats)")
|
||||
messageId: Optional[str] = Field(None, description="Foreign key to message (for message stats)")
|
||||
processingTime: Optional[float] = Field(None, description="Processing time in seconds")
|
||||
tokenCount: Optional[int] = Field(None, description="Number of tokens processed")
|
||||
bytesSent: Optional[int] = Field(None, description="Number of bytes sent")
|
||||
bytesReceived: Optional[int] = Field(None, description="Number of bytes received")
|
||||
successRate: Optional[float] = Field(None, description="Success rate of operations")
|
||||
errorCount: Optional[int] = Field(None, description="Number of errors encountered")
|
||||
|
||||
# Register labels for ChatStat
|
||||
register_model_labels(
|
||||
"ChatStat",
|
||||
{"en": "Chat Statistics", "fr": "Statistiques de chat"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"workflowId": {"en": "Workflow ID", "fr": "ID du workflow"},
|
||||
"messageId": {"en": "Message ID", "fr": "ID du message"},
|
||||
"processingTime": {"en": "Processing Time", "fr": "Temps de traitement"},
|
||||
"tokenCount": {"en": "Token Count", "fr": "Nombre de tokens"},
|
||||
"bytesSent": {"en": "Bytes Sent", "fr": "Octets envoyés"},
|
||||
"bytesReceived": {"en": "Bytes Received", "fr": "Octets reçus"},
|
||||
"successRate": {"en": "Success Rate", "fr": "Taux de succès"},
|
||||
"errorCount": {"en": "Error Count", "fr": "Nombre d'erreurs"}
|
||||
}
|
||||
)
|
||||
|
||||
class ChatLog(BaseModel, ModelMixin):
|
||||
"""Data model for chat logs"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
|
||||
workflowId: str = Field(description="Foreign key to workflow")
|
||||
message: str = Field(description="Log message")
|
||||
type: str = Field(description="Log type (info, warning, error, etc.)")
|
||||
timestamp: float = Field(default_factory=get_utc_timestamp, description="When the log entry was created (UTC timestamp in seconds)")
|
||||
status: Optional[str] = Field(None, description="Status of the log entry")
|
||||
progress: Optional[float] = Field(None, description="Progress indicator (0.0 to 1.0)")
|
||||
performance: Optional[Dict[str, Any]] = Field(None, description="Performance metrics")
|
||||
|
||||
# Register labels for ChatLog
|
||||
register_model_labels(
|
||||
"ChatLog",
|
||||
{"en": "Chat Log", "fr": "Journal de chat"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"workflowId": {"en": "Workflow ID", "fr": "ID du flux de travail"},
|
||||
"message": {"en": "Message", "fr": "Message"},
|
||||
"type": {"en": "Type", "fr": "Type"},
|
||||
"timestamp": {"en": "Timestamp", "fr": "Horodatage"},
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"progress": {"en": "Progress", "fr": "Progression"},
|
||||
"performance": {"en": "Performance", "fr": "Performance"}
|
||||
}
|
||||
)
|
||||
|
||||
class ChatMessage(BaseModel, ModelMixin):
|
||||
"""Data model for a chat message"""
|
||||
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
|
||||
workflowId: str = Field(description="Foreign key to workflow")
|
||||
parentMessageId: Optional[str] = Field(None, description="Parent message ID for threading")
|
||||
documents: List[ChatDocument] = Field(default_factory=list, description="Associated documents")
|
||||
documentsLabel: Optional[str] = Field(None, description="Label for the set of documents")
|
||||
message: Optional[str] = Field(None, description="Message content")
|
||||
role: str = Field(description="Role of the message sender")
|
||||
status: str = Field(description="Status of the message (first, step, last)")
|
||||
sequenceNr: int = Field(description="Sequence number of the message (set automatically)")
|
||||
publishedAt: float = Field(default_factory=get_utc_timestamp, description="When the message was published (UTC timestamp in seconds)")
|
||||
stats: Optional[ChatStat] = Field(None, description="Statistics for this message")
|
||||
success: Optional[bool] = Field(None, description="Whether the message processing was successful")
|
||||
actionId: Optional[str] = Field(None, description="ID of the action that produced this message")
|
||||
actionMethod: Optional[str] = Field(None, description="Method of the action that produced this message")
|
||||
actionName: Optional[str] = Field(None, description="Name of the action that produced this message")
|
||||
|
||||
# New workflow context fields:
|
||||
roundNumber: Optional[int] = Field(None, description="Round number in workflow")
|
||||
taskNumber: Optional[int] = Field(None, description="Task number within round")
|
||||
actionNumber: Optional[int] = Field(None, description="Action number within task")
|
||||
|
||||
# New workflow progress fields:
|
||||
taskProgress: Optional[str] = Field(
|
||||
None,
|
||||
description="Task progress status: pending, running, success, fail, retry"
|
||||
)
|
||||
|
||||
actionProgress: Optional[str] = Field(
|
||||
None,
|
||||
description="Action progress status: pending, running, success, fail"
|
||||
)
|
||||
|
||||
# Register labels for ChatMessage
|
||||
register_model_labels(
|
||||
"ChatMessage",
|
||||
{"en": "Chat Message", "fr": "Message de chat"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"workflowId": {"en": "Workflow ID", "fr": "ID du flux de travail"},
|
||||
"parentMessageId": {"en": "Parent Message ID", "fr": "ID du message parent"},
|
||||
"documents": {"en": "Documents", "fr": "Documents"},
|
||||
"documentsLabel": {"en": "Documents Label", "fr": "Label des documents"},
|
||||
"message": {"en": "Message", "fr": "Message"},
|
||||
"role": {"en": "Role", "fr": "Rôle"},
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"sequenceNr": {"en": "Sequence Number", "fr": "Numéro de séquence"},
|
||||
"publishedAt": {"en": "Published At", "fr": "Publié le"},
|
||||
"stats": {"en": "Statistics", "fr": "Statistiques"},
|
||||
"success": {"en": "Success", "fr": "Succès"},
|
||||
"actionId": {"en": "Action ID", "fr": "ID de l'action"},
|
||||
"actionMethod": {"en": "Action Method", "fr": "Méthode de l'action"},
|
||||
"actionName": {"en": "Action Name", "fr": "Nom de l'action"},
|
||||
"roundNumber": {"en": "Round Number", "fr": "Numéro de tour"},
|
||||
"taskNumber": {"en": "Task Number", "fr": "Numéro de tâche"},
|
||||
"actionNumber": {"en": "Action Number", "fr": "Numéro d'action"},
|
||||
"taskProgress": {"en": "Task Progress", "fr": "Progression de la tâche"},
|
||||
"actionProgress": {"en": "Action Progress", "fr": "Progression de l'action"}
|
||||
}
|
||||
)
|
||||
|
||||
class ChatWorkflow(BaseModel, ModelMixin):
|
||||
"""Data model for a chat workflow"""
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid.uuid4()),
|
||||
description="Primary key",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
mandateId: str = Field(
|
||||
description="ID of the mandate this workflow belongs to",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
status: str = Field(
|
||||
description="Current status of the workflow",
|
||||
frontend_type="select",
|
||||
frontend_readonly=False,
|
||||
frontend_required=False,
|
||||
frontend_options=[
|
||||
{"value": "running", "label": {"en": "Running", "fr": "En cours"}},
|
||||
{"value": "completed", "label": {"en": "Completed", "fr": "Terminé"}},
|
||||
{"value": "stopped", "label": {"en": "Stopped", "fr": "Arrêté"}},
|
||||
{"value": "error", "label": {"en": "Error", "fr": "Erreur"}}
|
||||
]
|
||||
)
|
||||
name: Optional[str] = Field(
|
||||
None,
|
||||
description="Name of the workflow",
|
||||
frontend_type="text",
|
||||
frontend_readonly=False,
|
||||
frontend_required=True
|
||||
)
|
||||
currentRound: int = Field(
|
||||
description="Current round number",
|
||||
frontend_type="integer",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
currentTask: int = Field(
|
||||
default=0,
|
||||
description="Current task number",
|
||||
frontend_type="integer",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
currentAction: int = Field(
|
||||
default=0,
|
||||
description="Current action number",
|
||||
frontend_type="integer",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
totalTasks: int = Field(
|
||||
default=0,
|
||||
description="Total number of tasks in the workflow",
|
||||
frontend_type="integer",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
totalActions: int = Field(
|
||||
default=0,
|
||||
description="Total number of actions in the workflow",
|
||||
frontend_type="integer",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
lastActivity: float = Field(
|
||||
default_factory=get_utc_timestamp,
|
||||
description="Timestamp of last activity (UTC timestamp in seconds)",
|
||||
frontend_type="timestamp",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
startedAt: float = Field(
|
||||
default_factory=get_utc_timestamp,
|
||||
description="When the workflow started (UTC timestamp in seconds)",
|
||||
frontend_type="timestamp",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
logs: List[ChatLog] = Field(
|
||||
default_factory=list,
|
||||
description="Workflow logs",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
messages: List[ChatMessage] = Field(
|
||||
default_factory=list,
|
||||
description="Messages in the workflow",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
stats: Optional[ChatStat] = Field(
|
||||
None,
|
||||
description="Workflow statistics",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
tasks: List[TaskItem] = Field(
|
||||
default_factory=list,
|
||||
description="List of tasks in the workflow",
|
||||
frontend_type="text",
|
||||
frontend_readonly=True,
|
||||
frontend_required=False
|
||||
)
|
||||
# Workflow mode selection (e.g., Actionplan, React)
|
||||
workflowMode: str = Field(
|
||||
default="Actionplan",
|
||||
description="Workflow mode selector",
|
||||
frontend_type="select",
|
||||
frontend_readonly=False,
|
||||
frontend_required=False,
|
||||
frontend_options=[
|
||||
{"value": "Actionplan", "label": {"en": "Action Plan", "fr": "Plan d'actions"}},
|
||||
{"value": "React", "label": {"en": "React", "fr": "Réactif"}}
|
||||
]
|
||||
)
|
||||
maxSteps: int = Field(
|
||||
default=5,
|
||||
description="Maximum number of iterations in react mode",
|
||||
frontend_type="integer",
|
||||
frontend_readonly=False,
|
||||
frontend_required=False
|
||||
)
|
||||
|
||||
# Register labels for ChatWorkflow
|
||||
register_model_labels(
|
||||
"ChatWorkflow",
|
||||
{"en": "Chat Workflow", "fr": "Flux de travail de chat"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"name": {"en": "Name", "fr": "Nom"},
|
||||
"currentRound": {"en": "Current Round", "fr": "Tour actuel"},
|
||||
"currentTask": {"en": "Current Task", "fr": "Tâche actuelle"},
|
||||
"currentAction": {"en": "Current Action", "fr": "Action actuelle"},
|
||||
"totalTasks": {"en": "Total Tasks", "fr": "Total des tâches"},
|
||||
"totalActions": {"en": "Total Actions", "fr": "Total des actions"},
|
||||
"lastActivity": {"en": "Last Activity", "fr": "Dernière activité"},
|
||||
"startedAt": {"en": "Started At", "fr": "Démarré le"},
|
||||
"logs": {"en": "Logs", "fr": "Journaux"},
|
||||
"messages": {"en": "Messages", "fr": "Messages"},
|
||||
"stats": {"en": "Statistics", "fr": "Statistiques"},
|
||||
"tasks": {"en": "Tasks", "fr": "Tâches"},
|
||||
"workflowMode": {"en": "Workflow Mode", "fr": "Mode de workflow"},
|
||||
"maxSteps": {"en": "Max Steps", "fr": "Étapes max"}
|
||||
}
|
||||
)
|
||||
|
||||
# ====== WORKFLOW SUPPORT MODELS ======
|
||||
|
||||
class TaskStep(BaseModel, ModelMixin):
|
||||
id: str
|
||||
objective: str
|
||||
dependencies: Optional[list[str]] = Field(default_factory=list)
|
||||
success_criteria: Optional[list[str]] = Field(default_factory=list)
|
||||
estimated_complexity: Optional[str] = None
|
||||
userMessage: Optional[str] = Field(None, description="User-friendly message in user's language")
|
||||
|
||||
# Register labels for TaskStep
|
||||
register_model_labels(
|
||||
"TaskStep",
|
||||
{"en": "Task Step", "fr": "Étape de tâche"},
|
||||
{
|
||||
"id": {"en": "ID", "fr": "ID"},
|
||||
"objective": {"en": "Objective", "fr": "Objectif"},
|
||||
"dependencies": {"en": "Dependencies", "fr": "Dépendances"},
|
||||
"success_criteria": {"en": "Success Criteria", "fr": "Critères de succès"},
|
||||
"estimated_complexity": {"en": "Estimated Complexity", "fr": "Complexité estimée"},
|
||||
"userMessage": {"en": "User Message", "fr": "Message utilisateur"}
|
||||
}
|
||||
)
|
||||
|
||||
class TaskHandover(BaseModel, ModelMixin):
|
||||
"""Structured handover between workflow phases and tasks"""
|
||||
taskId: str = Field(description="Target task ID")
|
||||
sourceTask: Optional[str] = Field(None, description="Source task ID")
|
||||
|
||||
# Document handovers
|
||||
inputDocuments: List[DocumentExchange] = Field(default_factory=list, description="Available input documents")
|
||||
outputDocuments: List[DocumentExchange] = Field(default_factory=list, description="Produced output documents")
|
||||
|
||||
# Context and state
|
||||
context: Dict[str, Any] = Field(default_factory=dict, description="Task context")
|
||||
previousResults: List[str] = Field(default_factory=list, description="Previous result summaries")
|
||||
improvements: List[str] = Field(default_factory=list, description="Improvement suggestions")
|
||||
|
||||
# Workflow context
|
||||
workflowSummary: Optional[str] = Field(None, description="Summarized workflow context")
|
||||
messageHistory: List[str] = Field(default_factory=list, description="Key message summaries")
|
||||
|
||||
# Metadata
|
||||
timestamp: float = Field(default_factory=get_utc_timestamp, description="When the handover was created (UTC timestamp in seconds)")
|
||||
handoverType: str = Field(default="task", description="Type of handover: task, phase, or workflow")
|
||||
|
||||
def addInputDocument(self, documentExchange: DocumentExchange) -> None:
|
||||
"""Add an input document exchange"""
|
||||
self.inputDocuments.append(documentExchange)
|
||||
|
||||
def addOutputDocument(self, documentExchange: DocumentExchange) -> None:
|
||||
"""Add an output document exchange"""
|
||||
self.outputDocuments.append(documentExchange)
|
||||
|
||||
def getDocumentsForAction(self, actionId: str) -> List[DocumentExchange]:
|
||||
"""Get all document exchanges relevant for a specific action"""
|
||||
relevant = []
|
||||
for doc_exchange in self.inputDocuments + self.outputDocuments:
|
||||
if doc_exchange.isForAction(actionId):
|
||||
relevant.append(doc_exchange)
|
||||
return relevant
|
||||
|
||||
# Register labels for TaskHandover
|
||||
register_model_labels(
|
||||
"TaskHandover",
|
||||
{"en": "Task Handover", "fr": "Transfert de tâche"},
|
||||
{
|
||||
"taskId": {"en": "Task ID", "fr": "ID de la tâche"},
|
||||
"sourceTask": {"en": "Source Task", "fr": "Tâche source"},
|
||||
"inputDocuments": {"en": "Input Documents", "fr": "Documents d'entrée"},
|
||||
"outputDocuments": {"en": "Output Documents", "fr": "Documents de sortie"},
|
||||
"context": {"en": "Context", "fr": "Contexte"},
|
||||
"previousResults": {"en": "Previous Results", "fr": "Résultats précédents"},
|
||||
"improvements": {"en": "Improvements", "fr": "Améliorations"},
|
||||
"workflowSummary": {"en": "Workflow Summary", "fr": "Résumé du workflow"},
|
||||
"messageHistory": {"en": "Message History", "fr": "Historique des messages"},
|
||||
"timestamp": {"en": "Timestamp", "fr": "Horodatage"},
|
||||
"handoverType": {"en": "Handover Type", "fr": "Type de transfert"}
|
||||
}
|
||||
)
|
||||
|
||||
class TaskContext(BaseModel, ModelMixin):
|
||||
task_step: TaskStep
|
||||
workflow: Optional['ChatWorkflow'] = None
|
||||
workflow_id: Optional[str] = None
|
||||
|
||||
# Available resources
|
||||
available_documents: Optional[str] = "No documents available"
|
||||
available_connections: Optional[list[str]] = Field(default_factory=list)
|
||||
|
||||
# Previous execution state
|
||||
previous_results: Optional[list[str]] = Field(default_factory=list)
|
||||
previous_handover: Optional[TaskHandover] = None
|
||||
|
||||
# Current execution state
|
||||
improvements: Optional[list[str]] = Field(default_factory=list)
|
||||
retry_count: Optional[int] = 0
|
||||
previous_action_results: Optional[list] = Field(default_factory=list)
|
||||
previous_review_result: Optional[dict] = None
|
||||
is_regeneration: Optional[bool] = False
|
||||
|
||||
# Failure analysis
|
||||
failure_patterns: Optional[list[str]] = Field(default_factory=list)
|
||||
failed_actions: Optional[list] = Field(default_factory=list)
|
||||
successful_actions: Optional[list] = Field(default_factory=list)
|
||||
|
||||
# Criteria progress tracking for retries
|
||||
criteria_progress: Optional[dict] = None
|
||||
|
||||
# Iterative loop controls (moved to ChatWorkflow.workflowMode and ChatWorkflow.maxSteps)
|
||||
# reactMode and maxSteps are now controlled at the workflow level
|
||||
|
||||
def getDocumentReferences(self) -> List[str]:
|
||||
"""Get all available document references from previous handover"""
|
||||
docs = []
|
||||
if self.previous_handover:
|
||||
for doc_exchange in self.previous_handover.inputDocuments:
|
||||
docs.extend(doc_exchange.documents)
|
||||
return list(set(docs)) # Remove duplicates
|
||||
|
||||
def addImprovement(self, improvement: str) -> None:
|
||||
"""Add an improvement suggestion"""
|
||||
if improvement not in (self.improvements or []):
|
||||
if self.improvements is None:
|
||||
self.improvements = []
|
||||
self.improvements.append(improvement)
|
||||
|
||||
class ReviewContext(BaseModel, ModelMixin):
|
||||
task_step: TaskStep
|
||||
task_actions: Optional[list] = Field(default_factory=list)
|
||||
action_results: Optional[list] = Field(default_factory=list)
|
||||
step_result: Optional[dict] = Field(default_factory=dict)
|
||||
workflow_id: Optional[str] = None
|
||||
previous_results: Optional[list[str]] = Field(default_factory=list)
|
||||
|
||||
class ReviewResult(BaseModel, ModelMixin):
|
||||
status: str
|
||||
reason: Optional[str] = None
|
||||
improvements: Optional[list[str]] = Field(default_factory=list)
|
||||
quality_score: Optional[int] = 5
|
||||
missing_outputs: Optional[list[str]] = Field(default_factory=list)
|
||||
met_criteria: Optional[list[str]] = Field(default_factory=list)
|
||||
unmet_criteria: Optional[list[str]] = Field(default_factory=list)
|
||||
confidence: Optional[float] = 0.5
|
||||
userMessage: Optional[str] = Field(None, description="User-friendly message in user's language")
|
||||
|
||||
# Register labels for ReviewResult
|
||||
register_model_labels(
|
||||
"ReviewResult",
|
||||
{"en": "Review Result", "fr": "Résultat de l'évaluation"},
|
||||
{
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"reason": {"en": "Reason", "fr": "Raison"},
|
||||
"improvements": {"en": "Improvements", "fr": "Améliorations"},
|
||||
"quality_score": {"en": "Quality Score", "fr": "Score de qualité"},
|
||||
"missing_outputs": {"en": "Missing Outputs", "fr": "Sorties manquantes"},
|
||||
"met_criteria": {"en": "Met Criteria", "fr": "Critères respectés"},
|
||||
"unmet_criteria": {"en": "Unmet Criteria", "fr": "Critères non respectés"},
|
||||
"confidence": {"en": "Confidence", "fr": "Confiance"},
|
||||
"userMessage": {"en": "User Message", "fr": "Message utilisateur"}
|
||||
}
|
||||
)
|
||||
|
||||
class TaskPlan(BaseModel, ModelMixin):
|
||||
overview: str
|
||||
tasks: list[TaskStep]
|
||||
userMessage: Optional[str] = Field(None, description="Overall user-friendly message for the task plan")
|
||||
|
||||
# Register labels for TaskPlan
|
||||
register_model_labels(
|
||||
"TaskPlan",
|
||||
{"en": "Task Plan", "fr": "Plan de tâches"},
|
||||
{
|
||||
"overview": {"en": "Overview", "fr": "Aperçu"},
|
||||
"tasks": {"en": "Tasks", "fr": "Tâches"},
|
||||
"userMessage": {"en": "User Message", "fr": "Message utilisateur"}
|
||||
}
|
||||
)
|
||||
|
||||
class WorkflowResult(BaseModel, ModelMixin):
|
||||
status: str
|
||||
completed_tasks: int
|
||||
total_tasks: int
|
||||
execution_time: float
|
||||
final_results_count: int
|
||||
error: Optional[str] = None
|
||||
phase: Optional[str] = None
|
||||
|
||||
# Register labels for WorkflowResult
|
||||
register_model_labels(
|
||||
"WorkflowResult",
|
||||
{"en": "Workflow Result", "fr": "Résultat du workflow"},
|
||||
{
|
||||
"status": {"en": "Status", "fr": "Statut"},
|
||||
"completed_tasks": {"en": "Completed Tasks", "fr": "Tâches terminées"},
|
||||
"total_tasks": {"en": "Total Tasks", "fr": "Total des tâches"},
|
||||
"execution_time": {"en": "Execution Time", "fr": "Temps d'exécution"},
|
||||
"final_results_count": {"en": "Final Results Count", "fr": "Nombre de résultats finaux"},
|
||||
"error": {"en": "Error", "fr": "Erreur"},
|
||||
"phase": {"en": "Phase", "fr": "Phase"}
|
||||
}
|
||||
)
|
||||
|
||||
# ===== Centralized AI Call Response Models =====
|
||||
|
||||
class AiResult(BaseModel, ModelMixin):
|
||||
"""Document result from centralized AI call"""
|
||||
filename: str = Field(description="Name of the result document")
|
||||
mimetype: str = Field(description="MIME type of the result document")
|
||||
content: str = Field(description="Content of the result document")
|
||||
|
||||
# Register labels for AiResult
|
||||
register_model_labels(
|
||||
"AiResult",
|
||||
{"en": "Result Document", "fr": "Document de résultat"},
|
||||
{
|
||||
"filename": {"en": "Filename", "fr": "Nom de fichier"},
|
||||
"mimetype": {"en": "MIME Type", "fr": "Type MIME"},
|
||||
"content": {"en": "Content", "fr": "Contenu"}
|
||||
}
|
||||
)
|
||||
|
||||
class CentralizedAiResponse(BaseModel, ModelMixin):
|
||||
"""Standardized response format from centralized AI calls"""
|
||||
aiResults: List[AiResult] = Field(default_factory=list, description="List of result documents")
|
||||
success: bool = Field(description="Whether the AI call was successful")
|
||||
error: Optional[str] = Field(None, description="Error message if the call failed")
|
||||
|
||||
# Register labels for CentralizedAiResponse
|
||||
register_model_labels(
|
||||
"CentralizedAiResponse",
|
||||
{"en": "Centralized AI Response", "fr": "Réponse IA centralisée"},
|
||||
{
|
||||
"aiResults": {"en": "Result Documents", "fr": "Documents de résultat"},
|
||||
"success": {"en": "Success", "fr": "Succès"},
|
||||
"error": {"en": "Error", "fr": "Erreur"}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -16,7 +16,7 @@ import re
|
|||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.shared.timezoneUtils import get_utc_now, get_utc_timestamp
|
||||
from modules.interfaces.interfaceAppAccess import AppAccess
|
||||
from modules.interfaces.interfaceDbAppAccess import AppAccess
|
||||
from modules.datamodels.datamodelUam import (
|
||||
User, Mandate, UserInDB, UserConnection,
|
||||
AuthAuthority, UserPrivilege, ConnectionStatus,
|
||||
|
|
@ -951,8 +951,6 @@ class AppObjects:
|
|||
def cleanupExpiredTokens(self) -> int:
|
||||
"""Clean up expired tokens for all connections, returns count of cleaned tokens"""
|
||||
try:
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
current_time = get_utc_timestamp()
|
||||
cleaned_count = 0
|
||||
|
||||
|
|
@ -11,10 +11,22 @@ from typing import Dict, Any, List, Optional, Union, get_origin, get_args
|
|||
|
||||
import asyncio
|
||||
|
||||
from modules.interfaces.interfaceChatAccess import ChatAccess
|
||||
from modules.datamodels.datamodelWorkflow import UserInputRequest, TaskAction, TaskResult
|
||||
from modules.datamodels.datamodelWorkflow import TaskItem, TaskStatus, ActionResult
|
||||
from modules.datamodels.datamodelChat import ChatDocument, ChatStat, ChatLog, ChatMessage, ChatWorkflow
|
||||
from modules.interfaces.interfaceDbChatAccess import ChatAccess
|
||||
from modules.datamodels.datamodelWorkflow import (
|
||||
TaskAction,
|
||||
TaskResult,
|
||||
TaskItem,
|
||||
TaskStatus,
|
||||
ActionResult
|
||||
)
|
||||
from modules.datamodels.datamodelChat import (
|
||||
UserInputRequest,
|
||||
ChatDocument,
|
||||
ChatStat,
|
||||
ChatLog,
|
||||
ChatMessage,
|
||||
ChatWorkflow
|
||||
)
|
||||
from modules.datamodels.datamodelUam import User
|
||||
|
||||
# DYNAMIC PART: Connectors to the Interface
|
||||
|
|
@ -5,23 +5,20 @@ Uses the JSON connector for data access with added language support.
|
|||
|
||||
import os
|
||||
import logging
|
||||
import base64
|
||||
import hashlib
|
||||
from datetime import datetime, UTC
|
||||
from typing import Dict, Any, List, Optional, Union
|
||||
|
||||
import hashlib
|
||||
|
||||
from modules.interfaces.interfaceComponentAccess import ComponentAccess
|
||||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
from modules.interfaces.interfaceDbComponentAccess import ComponentAccess
|
||||
from modules.datamodels.datamodelFiles import FilePreview, FileItem, FileData
|
||||
from modules.datamodels.datamodelUtils import Prompt
|
||||
from modules.datamodels.datamodelVoice import VoiceSettings
|
||||
from modules.datamodels.datamodelUam import User, Mandate
|
||||
|
||||
# DYNAMIC PART: Connectors to the Interface
|
||||
from modules.connectors.connectorDbPostgre import DatabaseConnector
|
||||
|
||||
# Basic Configurations
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Singleton factory for Management instances with AI service per context
|
||||
|
|
@ -150,7 +147,7 @@ class ComponentObjects:
|
|||
return
|
||||
|
||||
# Get the root interface to access the initial mandate ID
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
rootInterface = getRootInterface()
|
||||
|
||||
# Get initial mandate ID through the root interface
|
||||
|
|
@ -388,7 +385,6 @@ class ComponentObjects:
|
|||
|
||||
def getMimeType(self, fileName: str) -> str:
|
||||
"""Determines the MIME type based on the file extension."""
|
||||
import os
|
||||
ext = os.path.splitext(fileName)[1].lower()[1:]
|
||||
extensionToMime = {
|
||||
"pdf": "application/pdf",
|
||||
|
|
@ -558,7 +554,6 @@ class ComponentObjects:
|
|||
|
||||
def createFile(self, name: str, mimeType: str, content: bytes) -> FileItem:
|
||||
"""Creates a new file entry if user has permission. Computes fileHash and fileSize from content."""
|
||||
import hashlib
|
||||
if not self._canModify(FileItem):
|
||||
raise PermissionError("No permission to create files")
|
||||
|
||||
|
|
@ -655,8 +650,6 @@ class ComponentObjects:
|
|||
def createFileData(self, fileId: str, data: bytes) -> bool:
|
||||
"""Stores the binary data of a file in the database."""
|
||||
try:
|
||||
import base64
|
||||
|
||||
# Check file access
|
||||
file = self.getFile(fileId)
|
||||
if not file:
|
||||
|
|
@ -715,8 +708,6 @@ class ComponentObjects:
|
|||
logger.warning(f"No access to file ID {fileId}")
|
||||
return None
|
||||
|
||||
import base64
|
||||
|
||||
fileDataEntries = self.db.getRecordset(FileData, recordFilter={"id": fileId})
|
||||
if not fileDataEntries:
|
||||
logger.warning(f"No data found for file ID {fileId}")
|
||||
|
|
@ -791,12 +782,10 @@ class ComponentObjects:
|
|||
encoding = 'latin-1'
|
||||
elif file.mimeType.startswith("image/"):
|
||||
# For images, return base64
|
||||
import base64
|
||||
content = base64.b64encode(fileContent).decode('utf-8')
|
||||
isText = False
|
||||
else:
|
||||
# For other files, return as base64
|
||||
import base64
|
||||
content = base64.b64encode(fileContent).decode('utf-8')
|
||||
isText = False
|
||||
|
||||
|
|
@ -827,7 +816,6 @@ class ComponentObjects:
|
|||
raise ValueError(f"fileContent must be bytes, got {type(fileContent)}")
|
||||
|
||||
# Compute file hash first to check for duplicates
|
||||
import hashlib
|
||||
fileHash = hashlib.sha256(fileContent).hexdigest()
|
||||
|
||||
# Check for exact name+hash match first (same name + same content)
|
||||
|
|
@ -894,10 +882,8 @@ class ComponentObjects:
|
|||
# Ensure timestamps are set for validation
|
||||
settings_data = filteredSettings[0]
|
||||
if not settings_data.get("creationDate"):
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
settings_data["creationDate"] = get_utc_timestamp()
|
||||
if not settings_data.get("lastModified"):
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
settings_data["lastModified"] = get_utc_timestamp()
|
||||
|
||||
return VoiceSettings.from_dict(settings_data)
|
||||
|
|
@ -946,7 +932,6 @@ class ComponentObjects:
|
|||
raise ValueError(f"Voice settings not found for user {userId}")
|
||||
|
||||
# Update lastModified timestamp
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
updateData["lastModified"] = get_utc_timestamp()
|
||||
|
||||
# Update voice settings record
|
||||
|
|
@ -2,7 +2,7 @@ from typing import Any, Optional
|
|||
from datetime import datetime, timezone
|
||||
|
||||
# Module-level factory to create TicketInterface by connector type
|
||||
async def createTicketInterfaceByType(
|
||||
async def _createTicketInterfaceByType(
|
||||
*,
|
||||
taskSyncDefinition: dict,
|
||||
connectorType: str,
|
||||
|
|
|
|||
498
modules/interfaces/interfaceVoiceObjects.py
Normal file
498
modules/interfaces/interfaceVoiceObjects.py
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
"""
|
||||
Interface for Voice Services
|
||||
Provides a generic interface layer between routes and voice connectors.
|
||||
Handles voice operations including speech-to-text, text-to-speech, and translation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Dict, Any, Optional, List
|
||||
from datetime import datetime, UTC
|
||||
|
||||
from modules.connectors.connectorVoiceGoogle import ConnectorGoogleSpeech
|
||||
from modules.datamodels.datamodelVoice import VoiceSettings
|
||||
from modules.datamodels.datamodelUam import User
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Singleton factory for Voice instances
|
||||
_instancesVoice = {}
|
||||
|
||||
class VoiceObjects:
|
||||
"""
|
||||
Interface for Voice Services.
|
||||
Provides a generic interface layer between routes and voice connectors.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the Voice Interface."""
|
||||
self.currentUser: Optional[User] = None
|
||||
self.userId: Optional[str] = None
|
||||
self._google_speech_connector: Optional[ConnectorGoogleSpeech] = None
|
||||
|
||||
def setUserContext(self, currentUser: User):
|
||||
"""Set the user context for the interface."""
|
||||
if not currentUser:
|
||||
logger.info("Initializing voice interface without user context")
|
||||
return
|
||||
|
||||
self.currentUser = currentUser
|
||||
self.userId = currentUser.id
|
||||
|
||||
if not self.userId:
|
||||
raise ValueError("Invalid user context: id is required")
|
||||
|
||||
logger.debug(f"Voice interface user context set: userId={self.userId}")
|
||||
|
||||
def _getGoogleSpeechConnector(self) -> ConnectorGoogleSpeech:
|
||||
"""Get or create Google Cloud Speech connector instance."""
|
||||
if self._google_speech_connector is None:
|
||||
try:
|
||||
self._google_speech_connector = ConnectorGoogleSpeech()
|
||||
logger.info("✅ Google Cloud Speech connector initialized")
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to initialize Google Cloud Speech connector: {e}")
|
||||
raise
|
||||
return self._google_speech_connector
|
||||
|
||||
# Speech-to-Text Operations
|
||||
|
||||
async def speechToText(self, audioContent: bytes, language: str = "de-DE",
|
||||
sampleRate: int = None, channels: int = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert speech to text using Google Cloud Speech-to-Text API.
|
||||
|
||||
Args:
|
||||
audioContent: Raw audio data
|
||||
language: Language code (e.g., 'de-DE', 'en-US')
|
||||
sampleRate: Audio sample rate (auto-detected if None)
|
||||
channels: Number of audio channels (auto-detected if None)
|
||||
|
||||
Returns:
|
||||
Dict containing transcribed text, confidence, and metadata
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🎤 Speech-to-text request: {len(audioContent)} bytes, language: {language}")
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = await connector.speech_to_text(
|
||||
audio_content=audioContent,
|
||||
language=language,
|
||||
sample_rate=sampleRate,
|
||||
channels=channels
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Speech-to-text successful: '{result['text']}' (confidence: {result['confidence']:.2f})")
|
||||
else:
|
||||
logger.warning(f"⚠️ Speech-to-text failed: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Speech-to-text error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"text": "",
|
||||
"confidence": 0.0,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Translation Operations
|
||||
|
||||
async def translateText(self, text: str, sourceLanguage: str = "de",
|
||||
targetLanguage: str = "en") -> Dict[str, Any]:
|
||||
"""
|
||||
Translate text using Google Cloud Translation API.
|
||||
|
||||
Args:
|
||||
text: Text to translate
|
||||
sourceLanguage: Source language code (e.g., 'de', 'en')
|
||||
targetLanguage: Target language code (e.g., 'en', 'de')
|
||||
|
||||
Returns:
|
||||
Dict containing translated text and metadata
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🌐 Translation request: '{text}' ({sourceLanguage} -> {targetLanguage})")
|
||||
|
||||
if not text.strip():
|
||||
return {
|
||||
"success": False,
|
||||
"translated_text": "",
|
||||
"error": "Empty text provided"
|
||||
}
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = await connector.translate_text(
|
||||
text=text,
|
||||
source_language=sourceLanguage,
|
||||
target_language=targetLanguage
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Translation successful: '{result['translated_text']}'")
|
||||
else:
|
||||
logger.warning(f"⚠️ Translation failed: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Translation error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"translated_text": "",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Combined Operations
|
||||
|
||||
async def speechToTranslatedText(self, audioContent: bytes,
|
||||
fromLanguage: str = "de-DE",
|
||||
toLanguage: str = "en") -> Dict[str, Any]:
|
||||
"""
|
||||
Complete pipeline: Speech-to-Text + Translation.
|
||||
|
||||
Args:
|
||||
audioContent: Raw audio data
|
||||
fromLanguage: Source language for speech recognition
|
||||
toLanguage: Target language for translation
|
||||
|
||||
Returns:
|
||||
Dict containing original text, translated text, and metadata
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🔄 Speech-to-translation pipeline: {fromLanguage} -> {toLanguage}")
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = await connector.speech_to_translated_text(
|
||||
audio_content=audioContent,
|
||||
from_language=fromLanguage,
|
||||
to_language=toLanguage
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Complete pipeline successful:")
|
||||
logger.info(f" Original: '{result['original_text']}'")
|
||||
logger.info(f" Translated: '{result['translated_text']}'")
|
||||
else:
|
||||
logger.warning(f"⚠️ Speech-to-translation pipeline failed: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Speech-to-translation pipeline error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"original_text": "",
|
||||
"translated_text": "",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Text-to-Speech Operations
|
||||
|
||||
async def textToSpeech(self, text: str, languageCode: str = "de-DE",
|
||||
voiceName: str = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert text to speech using Google Cloud Text-to-Speech.
|
||||
|
||||
Args:
|
||||
text: Text to convert to speech
|
||||
languageCode: Language code (e.g., 'de-DE', 'en-US')
|
||||
voiceName: Specific voice name (optional)
|
||||
|
||||
Returns:
|
||||
Dict with success status and audio data
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🔊 Text-to-Speech request: '{text[:50]}...' in {languageCode}")
|
||||
|
||||
if not text.strip():
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Empty text provided for text-to-speech"
|
||||
}
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = await connector.text_to_speech(
|
||||
text=text,
|
||||
language_code=languageCode,
|
||||
voice_name=voiceName
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Text-to-Speech successful: {len(result['audio_content'])} bytes")
|
||||
else:
|
||||
logger.warning(f"⚠️ Text-to-Speech failed: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Text-to-Speech error: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Voice Settings Management
|
||||
|
||||
def getVoiceSettings(self, userId: str) -> Optional[VoiceSettings]:
|
||||
"""
|
||||
Get voice settings for a user.
|
||||
|
||||
Args:
|
||||
userId: User ID to get settings for
|
||||
|
||||
Returns:
|
||||
VoiceSettings object or None if not found
|
||||
"""
|
||||
try:
|
||||
# This would typically query the database
|
||||
# For now, return None as this is handled by the database interface
|
||||
logger.debug(f"Getting voice settings for user: {userId}")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error getting voice settings: {e}")
|
||||
return None
|
||||
|
||||
def createVoiceSettings(self, settingsData: Dict[str, Any]) -> Optional[VoiceSettings]:
|
||||
"""
|
||||
Create new voice settings.
|
||||
|
||||
Args:
|
||||
settingsData: Dictionary containing voice settings data
|
||||
|
||||
Returns:
|
||||
Created VoiceSettings object or None if failed
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Creating voice settings: {settingsData}")
|
||||
|
||||
# Add timestamps
|
||||
currentTime = get_utc_timestamp()
|
||||
settingsData["creationDate"] = currentTime
|
||||
settingsData["lastModified"] = currentTime
|
||||
|
||||
# Create VoiceSettings object
|
||||
voiceSettings = VoiceSettings(**settingsData)
|
||||
|
||||
logger.info(f"✅ Voice settings created: {voiceSettings.id}")
|
||||
return voiceSettings
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error creating voice settings: {e}")
|
||||
return None
|
||||
|
||||
def updateVoiceSettings(self, userId: str, settingsData: Dict[str, Any]) -> Optional[VoiceSettings]:
|
||||
"""
|
||||
Update existing voice settings.
|
||||
|
||||
Args:
|
||||
userId: User ID to update settings for
|
||||
settingsData: Dictionary containing updated voice settings data
|
||||
|
||||
Returns:
|
||||
Updated VoiceSettings object or None if failed
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Updating voice settings for user {userId}: {settingsData}")
|
||||
|
||||
# Add last modified timestamp
|
||||
settingsData["lastModified"] = get_utc_timestamp()
|
||||
|
||||
# Create updated VoiceSettings object
|
||||
voiceSettings = VoiceSettings(**settingsData)
|
||||
|
||||
logger.info(f"✅ Voice settings updated: {voiceSettings.id}")
|
||||
return voiceSettings
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error updating voice settings: {e}")
|
||||
return None
|
||||
|
||||
def getOrCreateVoiceSettings(self, userId: str) -> Optional[VoiceSettings]:
|
||||
"""
|
||||
Get existing voice settings or create default ones.
|
||||
|
||||
Args:
|
||||
userId: User ID to get/create settings for
|
||||
|
||||
Returns:
|
||||
VoiceSettings object
|
||||
"""
|
||||
try:
|
||||
# Try to get existing settings
|
||||
existingSettings = self.getVoiceSettings(userId)
|
||||
|
||||
if existingSettings:
|
||||
return existingSettings
|
||||
|
||||
# Create default settings if none exist
|
||||
defaultSettings = {
|
||||
"userId": userId,
|
||||
"sttLanguage": "de-DE",
|
||||
"ttsLanguage": "de-DE",
|
||||
"ttsVoice": "de-DE-Wavenet-A",
|
||||
"translationEnabled": True,
|
||||
"targetLanguage": "en-US"
|
||||
}
|
||||
|
||||
return self.createVoiceSettings(defaultSettings)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error getting or creating voice settings: {e}")
|
||||
return None
|
||||
|
||||
# Language and Voice Information
|
||||
|
||||
async def getAvailableLanguages(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get available languages from Google Cloud Text-to-Speech.
|
||||
|
||||
Returns:
|
||||
Dict containing success status and list of available languages
|
||||
"""
|
||||
try:
|
||||
logger.info("🌐 Getting available languages from Google Cloud TTS")
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = await connector.get_available_languages()
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Found {len(result['languages'])} available languages")
|
||||
else:
|
||||
logger.warning(f"⚠️ Failed to get languages: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error getting available languages: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"languages": []
|
||||
}
|
||||
|
||||
async def getAvailableVoices(self, languageCode: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get available voices from Google Cloud Text-to-Speech.
|
||||
|
||||
Args:
|
||||
languageCode: Optional language code to filter voices
|
||||
|
||||
Returns:
|
||||
Dict containing success status and list of available voices
|
||||
"""
|
||||
try:
|
||||
logger.info(f"🎤 Getting available voices, language filter: {languageCode}")
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = await connector.get_available_voices(language_code=languageCode)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"✅ Found {len(result['voices'])} voices for language filter: {languageCode}")
|
||||
else:
|
||||
logger.warning(f"⚠️ Failed to get voices: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error getting available voices: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"voices": []
|
||||
}
|
||||
|
||||
# Audio Validation
|
||||
|
||||
def validateAudioFormat(self, audioContent: bytes) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate audio format for Google Cloud Speech-to-Text.
|
||||
|
||||
Args:
|
||||
audioContent: Raw audio data
|
||||
|
||||
Returns:
|
||||
Dict containing validation results
|
||||
"""
|
||||
try:
|
||||
logger.debug(f"Validating audio format: {len(audioContent)} bytes")
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
result = connector.validate_audio_format(audioContent)
|
||||
|
||||
if result["valid"]:
|
||||
logger.debug(f"✅ Audio validation successful: {result['format']}, {result['sample_rate']}Hz, {result['channels']}ch")
|
||||
else:
|
||||
logger.warning(f"⚠️ Audio validation failed: {result.get('error', 'Unknown error')}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Audio validation error: {e}")
|
||||
return {
|
||||
"valid": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Health Check
|
||||
|
||||
async def healthCheck(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Perform health check for voice services.
|
||||
|
||||
Returns:
|
||||
Dict containing health status and test results
|
||||
"""
|
||||
try:
|
||||
logger.info("🏥 Performing voice services health check")
|
||||
|
||||
connector = self._getGoogleSpeechConnector()
|
||||
|
||||
# Test with a simple translation
|
||||
testResult = await connector.translate_text(
|
||||
text="Hello",
|
||||
source_language="en",
|
||||
target_language="de"
|
||||
)
|
||||
|
||||
if testResult["success"]:
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "Google Cloud Speech-to-Text & Translation",
|
||||
"test_translation": testResult["translated_text"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"error": testResult.get("error", "Unknown error")
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Health check failed: {e}")
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
|
||||
def getVoiceInterface(currentUser: User = None) -> VoiceObjects:
|
||||
"""
|
||||
Factory function to get or create Voice interface instance.
|
||||
|
||||
Args:
|
||||
currentUser: User object for context (optional)
|
||||
|
||||
Returns:
|
||||
VoiceObjects instance
|
||||
"""
|
||||
# For now, create a new instance each time
|
||||
# In the future, this could be enhanced with singleton pattern per user
|
||||
voiceInterface = VoiceObjects()
|
||||
|
||||
if currentUser:
|
||||
voiceInterface.setUserContext(currentUser)
|
||||
|
||||
return voiceInterface
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
from dataclasses import dataclass
|
||||
|
||||
from modules.datamodels.datamodelWeb import (
|
||||
WebCrawlActionResult,
|
||||
WebCrawlActionDocument,
|
||||
WebCrawlDocumentData,
|
||||
WebCrawlRequest,
|
||||
WebCrawlResultItem,
|
||||
WebScrapeActionResult,
|
||||
WebScrapeActionDocument,
|
||||
WebSearchDocumentData as WebScrapeDocumentData,
|
||||
WebScrapeRequest,
|
||||
WebScrapeResultItem,
|
||||
WebSearchActionResult,
|
||||
WebSearchActionDocument,
|
||||
WebSearchDocumentData,
|
||||
WebSearchRequest,
|
||||
WebSearchResultItem,
|
||||
)
|
||||
from modules.connectors.connectorWebTavily import ConnectorWeb
|
||||
from modules.datamodels.datamodelWorkflow import ActionDocument
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class WebInterface:
|
||||
connectorWebTavily: ConnectorWeb
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.connectorWebTavily is None:
|
||||
raise TypeError(
|
||||
"connectorWebTavily must be provided. "
|
||||
"Use `await WebInterface.create()` or pass a ConnectorWeb."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def create(cls) -> "WebInterface":
|
||||
connectorWebTavily = await ConnectorWeb.create()
|
||||
return WebInterface(connectorWebTavily=connectorWebTavily)
|
||||
|
||||
# Methods
|
||||
|
||||
async def search(self, web_search_request: WebSearchRequest) -> WebSearchActionResult:
|
||||
return await self.connectorWebTavily.search(web_search_request)
|
||||
|
||||
async def crawl(self, web_crawl_request: WebCrawlRequest) -> WebCrawlActionResult:
|
||||
return await self.connectorWebTavily.crawl(web_crawl_request)
|
||||
|
||||
async def scrape(self, web_scrape_request: WebScrapeRequest) -> WebScrapeActionResult:
|
||||
return await self.connectorWebTavily.scrape(web_scrape_request)
|
||||
|
||||
# Helpers moved to MethodWeb
|
||||
|
|
@ -11,7 +11,7 @@ from datetime import datetime
|
|||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.security.auth import limiter, getCurrentUser
|
||||
from modules.datamodels.datamodelUam import User
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
|
||||
# Static folder setup - using absolute path from app root
|
||||
baseDir = FilePath(__file__).parent.parent.parent # Go up to gateway root
|
||||
|
|
|
|||
|
|
@ -12,12 +12,11 @@ from datetime import datetime
|
|||
from modules.security.auth import limiter, getCurrentUser
|
||||
|
||||
# Import interfaces
|
||||
import modules.interfaces.interfaceChatObjects as interfaceChatObjects
|
||||
from modules.interfaces.interfaceChatObjects import getInterface
|
||||
import modules.interfaces.interfaceDbChatObjects as interfaceDbChatObjects
|
||||
from modules.interfaces.interfaceDbChatObjects import getInterface
|
||||
|
||||
# Import models
|
||||
from modules.datamodels.datamodelChat import ChatWorkflow
|
||||
from modules.datamodels.datamodelWorkflow import UserInputRequest
|
||||
from modules.datamodels.datamodelChat import ChatWorkflow, UserInputRequest
|
||||
from modules.datamodels.datamodelUam import User
|
||||
|
||||
# Import workflow control functions
|
||||
|
|
@ -34,7 +33,7 @@ router = APIRouter(
|
|||
)
|
||||
|
||||
def getServiceChat(currentUser: User):
|
||||
return interfaceChatObjects.getInterface(currentUser)
|
||||
return interfaceDbChatObjects.getInterface(currentUser)
|
||||
|
||||
# Workflow start endpoint
|
||||
@router.post("/start", response_model=ChatWorkflow)
|
||||
|
|
@ -55,10 +54,10 @@ async def start_workflow(
|
|||
"""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Start or continue workflow using playground controller
|
||||
workflow = await chatStart(interfaceChat, currentUser, userInput, workflowId, workflowMode)
|
||||
workflow = await chatStart(interfaceDbChat, currentUser, userInput, workflowId, workflowMode)
|
||||
|
||||
return workflow
|
||||
|
||||
|
|
@ -80,10 +79,10 @@ async def stop_workflow(
|
|||
"""Stops a running workflow."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Stop workflow using playground controller
|
||||
workflow = await chatStop(interfaceChat, currentUser, workflowId)
|
||||
workflow = await chatStop(interfaceDbChat, currentUser, workflowId)
|
||||
|
||||
return workflow
|
||||
|
||||
|
|
@ -109,10 +108,10 @@ async def get_workflow_chat_data(
|
|||
"""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Verify workflow exists
|
||||
workflow = interfaceChat.getWorkflow(workflowId)
|
||||
workflow = interfaceDbChat.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
|
|
@ -120,7 +119,7 @@ async def get_workflow_chat_data(
|
|||
)
|
||||
|
||||
# Get unified chat data using the new method
|
||||
chatData = interfaceChat.getUnifiedChatData(workflowId, afterTimestamp)
|
||||
chatData = interfaceDbChat.getUnifiedChatData(workflowId, afterTimestamp)
|
||||
|
||||
return chatData
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import json
|
|||
from modules.datamodels.datamodelUam import User, UserConnection, AuthAuthority, ConnectionStatus
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
from modules.security.auth import getCurrentUser, limiter
|
||||
from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface, getRootInterface
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
# Configure logger
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from pydantic import BaseModel
|
|||
from modules.security.auth import limiter, getCurrentUser
|
||||
|
||||
# Import interfaces
|
||||
import modules.interfaces.interfaceComponentObjects as interfaceComponentObjects
|
||||
import modules.interfaces.interfaceDbComponentObjects as interfaceDbComponentObjects
|
||||
from modules.datamodels.datamodelFiles import FileItem, FilePreview
|
||||
from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse, AttributeDefinition
|
||||
from modules.datamodels.datamodelUam import User
|
||||
|
|
@ -46,7 +46,7 @@ async def get_files(
|
|||
) -> List[FileItem]:
|
||||
"""Get all files"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get all files generically - only metadata, no binary data
|
||||
files = managementInterface.getAllFiles()
|
||||
|
|
@ -72,17 +72,17 @@ async def upload_file(
|
|||
file.fileName = file.filename
|
||||
"""Upload a file"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Read file
|
||||
fileContent = await file.read()
|
||||
|
||||
# Check size limits
|
||||
maxSize = int(interfaceComponentObjects.APP_CONFIG.get("File_Management_MAX_UPLOAD_SIZE_MB")) * 1024 * 1024 # in bytes
|
||||
maxSize = int(interfaceDbComponentObjects.APP_CONFIG.get("File_Management_MAX_UPLOAD_SIZE_MB")) * 1024 * 1024 # in bytes
|
||||
if len(fileContent) > maxSize:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
|
||||
detail=f"File too large. Maximum size: {interfaceComponentObjects.APP_CONFIG.get('File_Management_MAX_UPLOAD_SIZE_MB')}MB"
|
||||
detail=f"File too large. Maximum size: {interfaceDbComponentObjects.APP_CONFIG.get('File_Management_MAX_UPLOAD_SIZE_MB')}MB"
|
||||
)
|
||||
|
||||
# Save file via LucyDOM interface in the database
|
||||
|
|
@ -115,7 +115,7 @@ async def upload_file(
|
|||
"isDuplicate": duplicateType != "new_file"
|
||||
})
|
||||
|
||||
except interfaceComponentObjects.FileStorageError as e:
|
||||
except interfaceDbComponentObjects.FileStorageError as e:
|
||||
logger.error(f"Error during file upload (storage): {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
|
|
@ -137,7 +137,7 @@ async def get_file(
|
|||
) -> FileItem:
|
||||
"""Get a file"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get file via LucyDOM interface from the database
|
||||
fileData = managementInterface.getFile(fileId)
|
||||
|
|
@ -149,19 +149,19 @@ async def get_file(
|
|||
|
||||
return fileData
|
||||
|
||||
except interfaceComponentObjects.FileNotFoundError as e:
|
||||
except interfaceDbComponentObjects.FileNotFoundError as e:
|
||||
logger.warning(f"File not found: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=str(e)
|
||||
)
|
||||
except interfaceComponentObjects.FilePermissionError as e:
|
||||
except interfaceDbComponentObjects.FilePermissionError as e:
|
||||
logger.warning(f"No permission for file: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=str(e)
|
||||
)
|
||||
except interfaceComponentObjects.FileError as e:
|
||||
except interfaceDbComponentObjects.FileError as e:
|
||||
logger.error(f"Error retrieving file: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
|
|
@ -184,7 +184,7 @@ async def update_file(
|
|||
) -> FileItem:
|
||||
"""Update file info"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get the file from the database
|
||||
file = managementInterface.getFile(fileId)
|
||||
|
|
@ -230,7 +230,7 @@ async def delete_file(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> Dict[str, Any]:
|
||||
"""Delete a file"""
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Check if the file exists
|
||||
existingFile = managementInterface.getFile(fileId)
|
||||
|
|
@ -257,7 +257,7 @@ async def get_file_stats(
|
|||
) -> Dict[str, Any]:
|
||||
"""Returns statistics about the stored files"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get all files - metadata only
|
||||
allFiles = managementInterface.getAllFiles()
|
||||
|
|
@ -296,7 +296,7 @@ async def download_file(
|
|||
) -> Response:
|
||||
"""Download a file"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get file data
|
||||
fileData = managementInterface.getFile(fileId)
|
||||
|
|
@ -344,7 +344,7 @@ async def preview_file(
|
|||
) -> FilePreview:
|
||||
"""Preview a file's content"""
|
||||
try:
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get file preview using the correct method
|
||||
preview = managementInterface.getFileContent(fileId)
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ from pydantic import BaseModel
|
|||
from modules.security.auth import limiter, getCurrentUser
|
||||
|
||||
# Import interfaces
|
||||
import modules.interfaces.interfaceAppObjects as interfaceAppObjects
|
||||
import modules.interfaces.interfaceDbAppObjects as interfaceDbAppObjects
|
||||
from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse, AttributeDefinition
|
||||
|
||||
# Import the model classes
|
||||
|
|
@ -44,7 +44,7 @@ async def get_mandates(
|
|||
) -> List[Mandate]:
|
||||
"""Get all mandates"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
mandates = appInterface.getAllMandates()
|
||||
return mandates
|
||||
except Exception as e:
|
||||
|
|
@ -63,7 +63,7 @@ async def get_mandate(
|
|||
) -> Mandate:
|
||||
"""Get a specific mandate by ID"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
mandate = appInterface.getMandate(mandateId)
|
||||
|
||||
if not mandate:
|
||||
|
|
@ -91,7 +91,7 @@ async def create_mandate(
|
|||
) -> Mandate:
|
||||
"""Create a new mandate"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
|
||||
# Create mandate
|
||||
newMandate = appInterface.createMandate(
|
||||
|
|
@ -125,7 +125,7 @@ async def update_mandate(
|
|||
) -> Mandate:
|
||||
"""Update an existing mandate"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
|
||||
# Check if mandate exists
|
||||
existingMandate = appInterface.getMandate(mandateId)
|
||||
|
|
@ -163,7 +163,7 @@ async def delete_mandate(
|
|||
) -> Dict[str, Any]:
|
||||
"""Delete a mandate"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
|
||||
# Check if mandate exists
|
||||
existingMandate = appInterface.getMandate(mandateId)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from pydantic import BaseModel
|
|||
from modules.security.auth import limiter, getCurrentUser
|
||||
|
||||
# Import interfaces
|
||||
import modules.interfaces.interfaceComponentObjects as interfaceComponentObjects
|
||||
import modules.interfaces.interfaceDbComponentObjects as interfaceDbComponentObjects
|
||||
from modules.datamodels.datamodelUtils import Prompt
|
||||
from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse, AttributeDefinition
|
||||
from modules.datamodels.datamodelUam import User
|
||||
|
|
@ -34,7 +34,7 @@ async def get_prompts(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> List[Prompt]:
|
||||
"""Get all prompts"""
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
prompts = managementInterface.getAllPrompts()
|
||||
return prompts
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ async def create_prompt(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> Prompt:
|
||||
"""Create a new prompt"""
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Convert Prompt to dict for interface
|
||||
prompt_data = prompt.dict()
|
||||
|
|
@ -64,7 +64,7 @@ async def get_prompt(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> Prompt:
|
||||
"""Get a specific prompt"""
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Get prompt
|
||||
prompt = managementInterface.getPrompt(promptId)
|
||||
|
|
@ -85,7 +85,7 @@ async def update_prompt(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> Prompt:
|
||||
"""Update an existing prompt"""
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Check if the prompt exists
|
||||
existingPrompt = managementInterface.getPrompt(promptId)
|
||||
|
|
@ -117,7 +117,7 @@ async def delete_prompt(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> Dict[str, Any]:
|
||||
"""Delete a prompt"""
|
||||
managementInterface = interfaceComponentObjects.getInterface(currentUser)
|
||||
managementInterface = interfaceDbComponentObjects.getInterface(currentUser)
|
||||
|
||||
# Check if the prompt exists
|
||||
existingPrompt = managementInterface.getPrompt(promptId)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import os
|
|||
from pydantic import BaseModel
|
||||
|
||||
# Import interfaces and models
|
||||
import modules.interfaces.interfaceAppObjects as interfaceAppObjects
|
||||
import modules.interfaces.interfaceDbAppObjects as interfaceDbAppObjects
|
||||
from modules.security.auth import getCurrentUser, limiter, getCurrentUser
|
||||
|
||||
# Import the attribute definition and helper functions
|
||||
|
|
@ -40,7 +40,7 @@ async def get_users(
|
|||
) -> List[User]:
|
||||
"""Get all users in the current mandate"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
# If mandateId is provided, use it, otherwise use the current user's mandate
|
||||
targetMandateId = mandateId or currentUser.mandateId
|
||||
# Get all users without filtering by enabled status
|
||||
|
|
@ -62,7 +62,7 @@ async def get_user(
|
|||
) -> User:
|
||||
"""Get a specific user by ID"""
|
||||
try:
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
# Get user without filtering by enabled status
|
||||
user = appInterface.getUser(userId)
|
||||
|
||||
|
|
@ -90,7 +90,7 @@ async def create_user(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> User:
|
||||
"""Create a new user"""
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
|
||||
# Convert User to dict for interface
|
||||
user_dict = user_data.dict()
|
||||
|
|
@ -109,7 +109,7 @@ async def update_user(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> User:
|
||||
"""Update an existing user"""
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
|
||||
# Check if the user exists
|
||||
existingUser = appInterface.getUser(userId)
|
||||
|
|
@ -300,7 +300,7 @@ async def delete_user(
|
|||
currentUser: User = Depends(getCurrentUser)
|
||||
) -> Dict[str, Any]:
|
||||
"""Delete a user"""
|
||||
appInterface = interfaceAppObjects.getInterface(currentUser)
|
||||
appInterface = interfaceDbAppObjects.getInterface(currentUser)
|
||||
|
||||
# Check if the user exists
|
||||
existingUser = appInterface.getUser(userId)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import os
|
|||
import logging
|
||||
|
||||
from modules.security.auth import getCurrentUser, limiter
|
||||
from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface, getRootInterface
|
||||
from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
|
@ -253,17 +253,17 @@ async def list_databases(
|
|||
# Get database names from configuration for each interface
|
||||
databases = []
|
||||
|
||||
# App database (interfaceAppObjects.py)
|
||||
# App database (interfaceDbAppObjects.py)
|
||||
app_db = APP_CONFIG.get("DB_APP_DATABASE")
|
||||
if app_db:
|
||||
databases.append(app_db)
|
||||
|
||||
# Chat database (interfaceChatObjects.py)
|
||||
# Chat database (interfaceDbChatObjects.py)
|
||||
chat_db = APP_CONFIG.get("DB_CHAT_DATABASE")
|
||||
if chat_db:
|
||||
databases.append(chat_db)
|
||||
|
||||
# Management database (interfaceComponentObjects.py)
|
||||
# Management database (interfaceDbComponentObjects.py)
|
||||
management_db = APP_CONFIG.get("DB_MANAGEMENT_DATABASE")
|
||||
if management_db:
|
||||
databases.append(management_db)
|
||||
|
|
@ -308,11 +308,11 @@ async def get_database_tables(
|
|||
appInterface = getRootInterface()
|
||||
tables = appInterface.db.getTables()
|
||||
elif database_name == chat_db:
|
||||
from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
|
||||
from modules.interfaces.interfaceDbChatObjects import getInterface as getChatInterface
|
||||
chatInterface = getChatInterface(currentUser)
|
||||
tables = chatInterface.db.getTables()
|
||||
elif database_name == management_db:
|
||||
from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface
|
||||
from modules.interfaces.interfaceDbComponentObjects import getInterface as getComponentInterface
|
||||
componentInterface = getComponentInterface(currentUser)
|
||||
tables = componentInterface.db.getTables()
|
||||
else:
|
||||
|
|
@ -358,10 +358,10 @@ async def drop_table(
|
|||
if database_name == app_db:
|
||||
interface = getRootInterface()
|
||||
elif database_name == chat_db:
|
||||
from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
|
||||
from modules.interfaces.interfaceDbChatObjects import getInterface as getChatInterface
|
||||
interface = getChatInterface(currentUser)
|
||||
elif database_name == management_db:
|
||||
from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface
|
||||
from modules.interfaces.interfaceDbComponentObjects import getInterface as getComponentInterface
|
||||
interface = getComponentInterface(currentUser)
|
||||
else:
|
||||
raise HTTPException(status_code=400, detail="Database not found")
|
||||
|
|
@ -423,10 +423,10 @@ async def drop_database(
|
|||
if db_name == app_db:
|
||||
interface = getRootInterface()
|
||||
elif db_name == chat_db:
|
||||
from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
|
||||
from modules.interfaces.interfaceDbChatObjects import getInterface as getChatInterface
|
||||
interface = getChatInterface(currentUser)
|
||||
elif db_name == management_db:
|
||||
from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface
|
||||
from modules.interfaces.interfaceDbComponentObjects import getInterface as getComponentInterface
|
||||
interface = getComponentInterface(currentUser)
|
||||
else:
|
||||
raise HTTPException(status_code=400, detail="Database not found")
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from requests_oauthlib import OAuth2Session
|
|||
import httpx
|
||||
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface, getRootInterface
|
||||
from modules.datamodels.datamodelUam import AuthAuthority, User, ConnectionStatus, UserConnection
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
from modules.security.auth import getCurrentUser, limiter
|
||||
|
|
@ -640,7 +640,7 @@ async def verify_token(
|
|||
|
||||
# Get a fresh token via TokenManager convenience method
|
||||
from modules.security.tokenManager import TokenManager
|
||||
current_token = TokenManager().getFreshToken(appInterface, google_connection.id)
|
||||
current_token = TokenManager().getFreshToken(google_connection.id)
|
||||
|
||||
if not current_token:
|
||||
raise HTTPException(
|
||||
|
|
@ -714,7 +714,7 @@ async def refresh_token(
|
|||
|
||||
# Get the token for this specific connection (fresh if expiring soon)
|
||||
from modules.security.tokenManager import TokenManager
|
||||
current_token = TokenManager().getFreshToken(appInterface, google_connection.id)
|
||||
current_token = TokenManager().getFreshToken(google_connection.id)
|
||||
|
||||
if not current_token:
|
||||
raise HTTPException(
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from pydantic import BaseModel
|
|||
# Import auth modules
|
||||
from modules.security.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM
|
||||
from modules.security.jwtService import createAccessToken, createRefreshToken, setAccessTokenCookie, setRefreshTokenCookie
|
||||
from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface, getRootInterface
|
||||
from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, UserPrivilege
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
from modules.shared.attributeUtils import ModelMixin
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import msal
|
|||
import httpx
|
||||
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.interfaces.interfaceAppObjects import getInterface, getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface, getRootInterface
|
||||
from modules.datamodels.datamodelUam import AuthAuthority, User, ConnectionStatus, UserConnection
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
from modules.security.auth import getCurrentUser, limiter
|
||||
|
|
@ -563,7 +563,7 @@ async def refresh_token(
|
|||
|
||||
# Get a fresh token via TokenManager convenience method
|
||||
from modules.security.tokenManager import TokenManager
|
||||
current_token = TokenManager().getFreshToken(appInterface, msft_connection.id)
|
||||
current_token = TokenManager().getFreshToken(msft_connection.id)
|
||||
|
||||
if not current_token:
|
||||
raise HTTPException(
|
||||
|
|
|
|||
|
|
@ -1,41 +1,63 @@
|
|||
"""
|
||||
Google Cloud Voice Services Routes
|
||||
Replaces Azure voice services with Google Cloud Speech-to-Text and Translation
|
||||
Includes WebSocket support for real-time voice streaming
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
from fastapi import APIRouter, File, Form, UploadFile, Depends, HTTPException, Body
|
||||
import json
|
||||
import base64
|
||||
import asyncio
|
||||
from fastapi import APIRouter, File, Form, UploadFile, Depends, HTTPException, Body, WebSocket, WebSocketDisconnect
|
||||
from fastapi.responses import Response
|
||||
from typing import Optional, Dict, Any
|
||||
from modules.connectors.connectorGoogleSpeech import ConnectorGoogleSpeech
|
||||
from typing import Optional, Dict, Any, List
|
||||
from modules.security.auth import getCurrentUser
|
||||
from modules.datamodels.datamodelUam import User
|
||||
from modules.interfaces.interfaceComponentObjects import getInterface
|
||||
from modules.interfaces.interfaceDbComponentObjects import getInterface
|
||||
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface, VoiceObjects
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/voice-google", tags=["Voice Google"])
|
||||
|
||||
# Global connector instance
|
||||
_google_speech_connector = None
|
||||
# Store active WebSocket connections
|
||||
active_connections: Dict[str, WebSocket] = {}
|
||||
|
||||
def get_google_speech_connector() -> ConnectorGoogleSpeech:
|
||||
"""Get or create Google Cloud Speech connector instance."""
|
||||
global _google_speech_connector
|
||||
|
||||
if _google_speech_connector is None:
|
||||
class ConnectionManager:
|
||||
def __init__(self):
|
||||
self.active_connections: List[WebSocket] = []
|
||||
|
||||
async def connect(self, websocket: WebSocket, connection_id: str):
|
||||
await websocket.accept()
|
||||
self.active_connections.append(websocket)
|
||||
active_connections[connection_id] = websocket
|
||||
logger.info(f"WebSocket connected: {connection_id}")
|
||||
|
||||
def disconnect(self, websocket: WebSocket, connection_id: str):
|
||||
if websocket in self.active_connections:
|
||||
self.active_connections.remove(websocket)
|
||||
if connection_id in active_connections:
|
||||
del active_connections[connection_id]
|
||||
logger.info(f"WebSocket disconnected: {connection_id}")
|
||||
|
||||
async def send_personal_message(self, message: dict, websocket: WebSocket):
|
||||
try:
|
||||
_google_speech_connector = ConnectorGoogleSpeech()
|
||||
logger.info("✅ Google Cloud Speech connector initialized")
|
||||
|
||||
await websocket.send_text(json.dumps(message))
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to initialize Google Cloud Speech connector: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to initialize Google Cloud Speech connector: {str(e)}"
|
||||
)
|
||||
|
||||
return _google_speech_connector
|
||||
logger.error(f"Error sending message: {e}")
|
||||
|
||||
manager = ConnectionManager()
|
||||
|
||||
def get_voice_interface(current_user: User) -> VoiceObjects:
|
||||
"""Get voice interface instance with user context."""
|
||||
try:
|
||||
return getVoiceInterface(current_user)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize voice interface: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to initialize voice interface: {str(e)}"
|
||||
)
|
||||
|
||||
@router.post("/speech-to-text")
|
||||
async def speech_to_text(
|
||||
|
|
@ -51,9 +73,11 @@ async def speech_to_text(
|
|||
audio_content = await audio_file.read()
|
||||
logger.info(f"📊 Audio file size: {len(audio_content)} bytes")
|
||||
|
||||
# Get voice interface
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
|
||||
# Validate audio format
|
||||
connector = get_google_speech_connector()
|
||||
validation = connector.validate_audio_format(audio_content)
|
||||
validation = voice_interface.validateAudioFormat(audio_content)
|
||||
|
||||
if not validation["valid"]:
|
||||
raise HTTPException(
|
||||
|
|
@ -62,8 +86,8 @@ async def speech_to_text(
|
|||
)
|
||||
|
||||
# Perform speech recognition
|
||||
result = await connector.speech_to_text(
|
||||
audio_content=audio_content,
|
||||
result = await voice_interface.speechToText(
|
||||
audioContent=audio_content,
|
||||
language=language
|
||||
)
|
||||
|
||||
|
|
@ -111,12 +135,14 @@ async def translate_text(
|
|||
detail="Empty text provided for translation"
|
||||
)
|
||||
|
||||
# Get voice interface
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
|
||||
# Perform translation
|
||||
connector = get_google_speech_connector()
|
||||
result = await connector.translate_text(
|
||||
result = await voice_interface.translateText(
|
||||
text=text,
|
||||
source_language=source_language,
|
||||
target_language=target_language
|
||||
sourceLanguage=source_language,
|
||||
targetLanguage=target_language
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
|
|
@ -168,9 +194,11 @@ async def realtime_interpreter(
|
|||
# f.write(audio_content)
|
||||
# logger.info(f"💾 Saved audio file for debugging: {debug_filename}")
|
||||
|
||||
# Get voice interface
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
|
||||
# Validate audio format
|
||||
connector = get_google_speech_connector()
|
||||
validation = connector.validate_audio_format(audio_content)
|
||||
validation = voice_interface.validateAudioFormat(audio_content)
|
||||
|
||||
if not validation["valid"]:
|
||||
raise HTTPException(
|
||||
|
|
@ -179,10 +207,10 @@ async def realtime_interpreter(
|
|||
)
|
||||
|
||||
# Perform complete pipeline: Speech-to-Text + Translation
|
||||
result = await connector.speech_to_translated_text(
|
||||
audio_content=audio_content,
|
||||
from_language=from_language,
|
||||
to_language=to_language
|
||||
result = await voice_interface.speechToTranslatedText(
|
||||
audioContent=audio_content,
|
||||
fromLanguage=from_language,
|
||||
toLanguage=to_language
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
|
|
@ -236,11 +264,11 @@ async def text_to_speech(
|
|||
detail="Empty text provided for text-to-speech"
|
||||
)
|
||||
|
||||
connector = get_google_speech_connector()
|
||||
result = await connector.text_to_speech(
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
result = await voice_interface.textToSpeech(
|
||||
text=text,
|
||||
language_code=language,
|
||||
voice_name=voice
|
||||
languageCode=language,
|
||||
voiceName=voice
|
||||
)
|
||||
|
||||
if result["success"]:
|
||||
|
|
@ -268,30 +296,76 @@ async def text_to_speech(
|
|||
detail=f"Text-to-Speech processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/languages")
|
||||
async def get_available_languages(current_user: User = Depends(getCurrentUser)):
|
||||
"""Get available languages from Google Cloud Text-to-Speech."""
|
||||
try:
|
||||
logger.info("🌐 Getting available languages from Google Cloud TTS")
|
||||
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
result = await voice_interface.getAvailableLanguages()
|
||||
|
||||
if result["success"]:
|
||||
return {
|
||||
"success": True,
|
||||
"languages": result["languages"]
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Failed to get languages: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Get languages error: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to get available languages: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/voices")
|
||||
async def get_available_voices(
|
||||
language_code: Optional[str] = None,
|
||||
current_user: User = Depends(getCurrentUser)
|
||||
):
|
||||
"""Get available voices from Google Cloud Text-to-Speech."""
|
||||
try:
|
||||
logger.info(f"🎤 Getting available voices, language filter: {language_code}")
|
||||
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
result = await voice_interface.getAvailableVoices(languageCode=language_code)
|
||||
|
||||
if result["success"]:
|
||||
return {
|
||||
"success": True,
|
||||
"voices": result["voices"],
|
||||
"language_filter": language_code
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Failed to get voices: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Get voices error: {e}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to get available voices: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/health")
|
||||
async def health_check(current_user: User = Depends(getCurrentUser)):
|
||||
"""Health check for Google Cloud voice services."""
|
||||
try:
|
||||
connector = get_google_speech_connector()
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
test_result = await voice_interface.healthCheck()
|
||||
|
||||
# Test with a simple translation
|
||||
test_result = await connector.translate_text(
|
||||
text="Hello",
|
||||
source_language="en",
|
||||
target_language="de"
|
||||
)
|
||||
|
||||
if test_result["success"]:
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "Google Cloud Speech-to-Text & Translation",
|
||||
"test_translation": test_result["translated_text"]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"error": test_result.get("error", "Unknown error")
|
||||
}
|
||||
return test_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Health check failed: {e}")
|
||||
|
|
@ -306,11 +380,11 @@ async def get_voice_settings(current_user: User = Depends(getCurrentUser)):
|
|||
try:
|
||||
logger.info(f"Getting voice settings for user: {current_user.id}")
|
||||
|
||||
# Get database interface with user context
|
||||
interface = getInterface(current_user)
|
||||
# Get voice interface
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
|
||||
# Get or create voice settings for the user
|
||||
voice_settings = interface.getOrCreateVoiceSettings(current_user.id)
|
||||
voice_settings = voice_interface.getOrCreateVoiceSettings(current_user.id)
|
||||
|
||||
if voice_settings:
|
||||
# Return user settings
|
||||
|
|
@ -376,23 +450,23 @@ async def save_voice_settings(
|
|||
if "targetLanguage" not in settings:
|
||||
settings["targetLanguage"] = "en-US"
|
||||
|
||||
# Get database interface with user context
|
||||
interface = getInterface(current_user)
|
||||
# Get voice interface
|
||||
voice_interface = get_voice_interface(current_user)
|
||||
|
||||
# Check if settings already exist for this user
|
||||
existing_settings = interface.getVoiceSettings(current_user.id)
|
||||
existing_settings = voice_interface.getVoiceSettings(current_user.id)
|
||||
|
||||
if existing_settings:
|
||||
# Update existing settings
|
||||
logger.info(f"Updating existing voice settings for user {current_user.id}")
|
||||
updated_settings = interface.updateVoiceSettings(current_user.id, settings)
|
||||
updated_settings = voice_interface.updateVoiceSettings(current_user.id, settings)
|
||||
logger.info(f"Voice settings updated for user {current_user.id}: {updated_settings}")
|
||||
else:
|
||||
# Create new settings
|
||||
logger.info(f"Creating new voice settings for user {current_user.id}")
|
||||
# Add userId to settings
|
||||
settings["userId"] = current_user.id
|
||||
created_settings = interface.createVoiceSettings(settings)
|
||||
created_settings = voice_interface.createVoiceSettings(settings)
|
||||
logger.info(f"Voice settings created for user {current_user.id}: {created_settings}")
|
||||
|
||||
return {
|
||||
|
|
@ -409,3 +483,190 @@ async def save_voice_settings(
|
|||
status_code=500,
|
||||
detail=f"Failed to save voice settings: {str(e)}"
|
||||
)
|
||||
|
||||
# WebSocket endpoints for real-time voice streaming
|
||||
|
||||
@router.websocket("/ws/realtime-interpreter")
|
||||
async def websocket_realtime_interpreter(
|
||||
websocket: WebSocket,
|
||||
user_id: str = "default",
|
||||
from_language: str = "de-DE",
|
||||
to_language: str = "en-US"
|
||||
):
|
||||
"""WebSocket endpoint for real-time voice interpretation"""
|
||||
connection_id = f"realtime_{user_id}_{from_language}_{to_language}"
|
||||
|
||||
try:
|
||||
await manager.connect(websocket, connection_id)
|
||||
|
||||
# Send connection confirmation
|
||||
await manager.send_personal_message({
|
||||
"type": "connected",
|
||||
"connection_id": connection_id,
|
||||
"message": "Connected to real-time interpreter"
|
||||
}, websocket)
|
||||
|
||||
# Initialize voice interface
|
||||
voice_interface = get_voice_interface(User(id=user_id))
|
||||
|
||||
while True:
|
||||
# Receive message from client
|
||||
data = await websocket.receive_text()
|
||||
message = json.loads(data)
|
||||
|
||||
if message["type"] == "audio_chunk":
|
||||
# Process audio chunk
|
||||
try:
|
||||
# Decode base64 audio data
|
||||
audio_data = base64.b64decode(message["data"])
|
||||
|
||||
# For now, just acknowledge receipt
|
||||
# In a full implementation, this would:
|
||||
# 1. Buffer audio chunks
|
||||
# 2. Process with Google Cloud Speech-to-Text streaming
|
||||
# 3. Send partial results back
|
||||
# 4. Handle translation
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "audio_received",
|
||||
"chunk_size": len(audio_data),
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing audio chunk: {e}")
|
||||
await manager.send_personal_message({
|
||||
"type": "error",
|
||||
"error": f"Failed to process audio: {str(e)}"
|
||||
}, websocket)
|
||||
|
||||
elif message["type"] == "ping":
|
||||
# Respond to ping
|
||||
await manager.send_personal_message({
|
||||
"type": "pong",
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
else:
|
||||
logger.warning(f"Unknown message type: {message['type']}")
|
||||
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket, connection_id)
|
||||
logger.info(f"Client disconnected: {connection_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
manager.disconnect(websocket, connection_id)
|
||||
|
||||
@router.websocket("/ws/speech-to-text")
|
||||
async def websocket_speech_to_text(
|
||||
websocket: WebSocket,
|
||||
user_id: str = "default",
|
||||
language: str = "de-DE"
|
||||
):
|
||||
"""WebSocket endpoint for real-time speech-to-text"""
|
||||
connection_id = f"stt_{user_id}_{language}"
|
||||
|
||||
try:
|
||||
await manager.connect(websocket, connection_id)
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "connected",
|
||||
"connection_id": connection_id,
|
||||
"message": "Connected to speech-to-text"
|
||||
}, websocket)
|
||||
|
||||
# Initialize voice interface
|
||||
voice_interface = get_voice_interface(User(id=user_id))
|
||||
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
message = json.loads(data)
|
||||
|
||||
if message["type"] == "audio_chunk":
|
||||
try:
|
||||
audio_data = base64.b64decode(message["data"])
|
||||
|
||||
# Process audio chunk
|
||||
# This would integrate with Google Cloud Speech-to-Text streaming API
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "transcription_result",
|
||||
"text": "Audio chunk received", # Placeholder
|
||||
"confidence": 0.95,
|
||||
"is_final": False
|
||||
}, websocket)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing audio: {e}")
|
||||
await manager.send_personal_message({
|
||||
"type": "error",
|
||||
"error": f"Failed to process audio: {str(e)}"
|
||||
}, websocket)
|
||||
|
||||
elif message["type"] == "ping":
|
||||
await manager.send_personal_message({
|
||||
"type": "pong",
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket, connection_id)
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
manager.disconnect(websocket, connection_id)
|
||||
|
||||
@router.websocket("/ws/text-to-speech")
|
||||
async def websocket_text_to_speech(
|
||||
websocket: WebSocket,
|
||||
user_id: str = "default",
|
||||
language: str = "de-DE",
|
||||
voice: str = "de-DE-Wavenet-A"
|
||||
):
|
||||
"""WebSocket endpoint for real-time text-to-speech"""
|
||||
connection_id = f"tts_{user_id}_{language}_{voice}"
|
||||
|
||||
try:
|
||||
await manager.connect(websocket, connection_id)
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "connected",
|
||||
"connection_id": connection_id,
|
||||
"message": "Connected to text-to-speech"
|
||||
}, websocket)
|
||||
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
message = json.loads(data)
|
||||
|
||||
if message["type"] == "text_to_speak":
|
||||
try:
|
||||
text = message["text"]
|
||||
|
||||
# Process text-to-speech
|
||||
# This would integrate with Google Cloud Text-to-Speech API
|
||||
|
||||
# For now, send a placeholder response
|
||||
await manager.send_personal_message({
|
||||
"type": "audio_data",
|
||||
"audio": "base64_encoded_audio_here", # Placeholder
|
||||
"format": "mp3"
|
||||
}, websocket)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing text-to-speech: {e}")
|
||||
await manager.send_personal_message({
|
||||
"type": "error",
|
||||
"error": f"Failed to process text: {str(e)}"
|
||||
}, websocket)
|
||||
|
||||
elif message["type"] == "ping":
|
||||
await manager.send_personal_message({
|
||||
"type": "pong",
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket, connection_id)
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
manager.disconnect(websocket, connection_id)
|
||||
|
|
|
|||
|
|
@ -1,231 +0,0 @@
|
|||
"""
|
||||
Voice Streaming WebSocket Routes
|
||||
Provides real-time audio streaming for voice services
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
import logging
|
||||
import json
|
||||
import base64
|
||||
import asyncio
|
||||
from typing import Dict, List
|
||||
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.connectors.connectorGoogleSpeech import ConnectorGoogleSpeech
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api/voice/ws", tags=["Voice Streaming"])
|
||||
|
||||
# Store active connections
|
||||
active_connections: Dict[str, WebSocket] = {}
|
||||
|
||||
class ConnectionManager:
|
||||
def __init__(self):
|
||||
self.active_connections: List[WebSocket] = []
|
||||
|
||||
async def connect(self, websocket: WebSocket, connection_id: str):
|
||||
await websocket.accept()
|
||||
self.active_connections.append(websocket)
|
||||
active_connections[connection_id] = websocket
|
||||
logger.info(f"WebSocket connected: {connection_id}")
|
||||
|
||||
def disconnect(self, websocket: WebSocket, connection_id: str):
|
||||
if websocket in self.active_connections:
|
||||
self.active_connections.remove(websocket)
|
||||
if connection_id in active_connections:
|
||||
del active_connections[connection_id]
|
||||
logger.info(f"WebSocket disconnected: {connection_id}")
|
||||
|
||||
async def send_personal_message(self, message: dict, websocket: WebSocket):
|
||||
try:
|
||||
await websocket.send_text(json.dumps(message))
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending message: {e}")
|
||||
|
||||
manager = ConnectionManager()
|
||||
|
||||
@router.websocket("/realtime-interpreter")
|
||||
async def websocket_realtime_interpreter(
|
||||
websocket: WebSocket,
|
||||
user_id: str = "default",
|
||||
from_language: str = "de-DE",
|
||||
to_language: str = "en-US"
|
||||
):
|
||||
"""WebSocket endpoint for real-time voice interpretation"""
|
||||
connection_id = f"realtime_{user_id}_{from_language}_{to_language}"
|
||||
|
||||
try:
|
||||
await manager.connect(websocket, connection_id)
|
||||
|
||||
# Send connection confirmation
|
||||
await manager.send_personal_message({
|
||||
"type": "connected",
|
||||
"connection_id": connection_id,
|
||||
"message": "Connected to real-time interpreter"
|
||||
}, websocket)
|
||||
|
||||
# Initialize Google Speech connector
|
||||
google_speech = ConnectorGoogleSpeech()
|
||||
|
||||
while True:
|
||||
# Receive message from client
|
||||
data = await websocket.receive_text()
|
||||
message = json.loads(data)
|
||||
|
||||
if message["type"] == "audio_chunk":
|
||||
# Process audio chunk
|
||||
try:
|
||||
# Decode base64 audio data
|
||||
audio_data = base64.b64decode(message["data"])
|
||||
|
||||
# For now, just acknowledge receipt
|
||||
# In a full implementation, this would:
|
||||
# 1. Buffer audio chunks
|
||||
# 2. Process with Google Cloud Speech-to-Text streaming
|
||||
# 3. Send partial results back
|
||||
# 4. Handle translation
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "audio_received",
|
||||
"chunk_size": len(audio_data),
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing audio chunk: {e}")
|
||||
await manager.send_personal_message({
|
||||
"type": "error",
|
||||
"error": f"Failed to process audio: {str(e)}"
|
||||
}, websocket)
|
||||
|
||||
elif message["type"] == "ping":
|
||||
# Respond to ping
|
||||
await manager.send_personal_message({
|
||||
"type": "pong",
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
else:
|
||||
logger.warning(f"Unknown message type: {message['type']}")
|
||||
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket, connection_id)
|
||||
logger.info(f"Client disconnected: {connection_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
manager.disconnect(websocket, connection_id)
|
||||
|
||||
@router.websocket("/speech-to-text")
|
||||
async def websocket_speech_to_text(
|
||||
websocket: WebSocket,
|
||||
user_id: str = "default",
|
||||
language: str = "de-DE"
|
||||
):
|
||||
"""WebSocket endpoint for real-time speech-to-text"""
|
||||
connection_id = f"stt_{user_id}_{language}"
|
||||
|
||||
try:
|
||||
await manager.connect(websocket, connection_id)
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "connected",
|
||||
"connection_id": connection_id,
|
||||
"message": "Connected to speech-to-text"
|
||||
}, websocket)
|
||||
|
||||
# Initialize Google Speech connector
|
||||
google_speech = ConnectorGoogleSpeech()
|
||||
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
message = json.loads(data)
|
||||
|
||||
if message["type"] == "audio_chunk":
|
||||
try:
|
||||
audio_data = base64.b64decode(message["data"])
|
||||
|
||||
# Process audio chunk
|
||||
# This would integrate with Google Cloud Speech-to-Text streaming API
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "transcription_result",
|
||||
"text": "Audio chunk received", # Placeholder
|
||||
"confidence": 0.95,
|
||||
"is_final": False
|
||||
}, websocket)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing audio: {e}")
|
||||
await manager.send_personal_message({
|
||||
"type": "error",
|
||||
"error": f"Failed to process audio: {str(e)}"
|
||||
}, websocket)
|
||||
|
||||
elif message["type"] == "ping":
|
||||
await manager.send_personal_message({
|
||||
"type": "pong",
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket, connection_id)
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
manager.disconnect(websocket, connection_id)
|
||||
|
||||
@router.websocket("/text-to-speech")
|
||||
async def websocket_text_to_speech(
|
||||
websocket: WebSocket,
|
||||
user_id: str = "default",
|
||||
language: str = "de-DE",
|
||||
voice: str = "de-DE-Wavenet-A"
|
||||
):
|
||||
"""WebSocket endpoint for real-time text-to-speech"""
|
||||
connection_id = f"tts_{user_id}_{language}_{voice}"
|
||||
|
||||
try:
|
||||
await manager.connect(websocket, connection_id)
|
||||
|
||||
await manager.send_personal_message({
|
||||
"type": "connected",
|
||||
"connection_id": connection_id,
|
||||
"message": "Connected to text-to-speech"
|
||||
}, websocket)
|
||||
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
message = json.loads(data)
|
||||
|
||||
if message["type"] == "text_to_speak":
|
||||
try:
|
||||
text = message["text"]
|
||||
|
||||
# Process text-to-speech
|
||||
# This would integrate with Google Cloud Text-to-Speech API
|
||||
|
||||
# For now, send a placeholder response
|
||||
await manager.send_personal_message({
|
||||
"type": "audio_data",
|
||||
"audio": "base64_encoded_audio_here", # Placeholder
|
||||
"format": "mp3"
|
||||
}, websocket)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing text-to-speech: {e}")
|
||||
await manager.send_personal_message({
|
||||
"type": "error",
|
||||
"error": f"Failed to process text: {str(e)}"
|
||||
}, websocket)
|
||||
|
||||
elif message["type"] == "ping":
|
||||
await manager.send_personal_message({
|
||||
"type": "pong",
|
||||
"timestamp": message.get("timestamp")
|
||||
}, websocket)
|
||||
|
||||
except WebSocketDisconnect:
|
||||
manager.disconnect(websocket, connection_id)
|
||||
except Exception as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
manager.disconnect(websocket, connection_id)
|
||||
|
|
@ -15,8 +15,8 @@ from datetime import datetime, timedelta
|
|||
from modules.security.auth import limiter, getCurrentUser
|
||||
|
||||
# Import interfaces
|
||||
import modules.interfaces.interfaceChatObjects as interfaceChatObjects
|
||||
from modules.interfaces.interfaceChatObjects import getInterface
|
||||
import modules.interfaces.interfaceDbChatObjects as interfaceDbChatObjects
|
||||
from modules.interfaces.interfaceDbChatObjects import getInterface
|
||||
|
||||
# Import models
|
||||
from modules.datamodels.datamodelChat import (
|
||||
|
|
@ -24,7 +24,7 @@ from modules.datamodels.datamodelChat import (
|
|||
ChatMessage,
|
||||
ChatLog,
|
||||
ChatStat,
|
||||
ChatDocument,
|
||||
ChatDocument
|
||||
)
|
||||
from modules.shared.attributeUtils import getModelAttributeDefinitions, AttributeResponse
|
||||
from modules.datamodels.datamodelUam import User
|
||||
|
|
@ -45,7 +45,7 @@ router = APIRouter(
|
|||
)
|
||||
|
||||
def getServiceChat(currentUser: User):
|
||||
return interfaceChatObjects.getInterface(currentUser)
|
||||
return interfaceDbChatObjects.getInterface(currentUser)
|
||||
|
||||
# Consolidated endpoint for getting all workflows
|
||||
@router.get("/", response_model=List[ChatWorkflow])
|
||||
|
|
@ -169,10 +169,10 @@ async def get_workflow_status(
|
|||
"""Get the current status of a workflow."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Retrieve workflow
|
||||
workflow = interfaceChat.getWorkflow(workflowId)
|
||||
workflow = interfaceDbChat.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
|
|
@ -201,10 +201,10 @@ async def get_workflow_logs(
|
|||
"""Get logs for a workflow with support for selective data transfer."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Verify workflow exists
|
||||
workflow = interfaceChat.getWorkflow(workflowId)
|
||||
workflow = interfaceDbChat.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
|
|
@ -212,7 +212,7 @@ async def get_workflow_logs(
|
|||
)
|
||||
|
||||
# Get all logs
|
||||
allLogs = interfaceChat.getLogs(workflowId)
|
||||
allLogs = interfaceDbChat.getLogs(workflowId)
|
||||
|
||||
# Apply selective data transfer if logId is provided
|
||||
if logId:
|
||||
|
|
@ -244,10 +244,10 @@ async def get_workflow_messages(
|
|||
"""Get messages for a workflow with support for selective data transfer."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Verify workflow exists
|
||||
workflow = interfaceChat.getWorkflow(workflowId)
|
||||
workflow = interfaceDbChat.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
|
|
@ -255,7 +255,7 @@ async def get_workflow_messages(
|
|||
)
|
||||
|
||||
# Get all messages
|
||||
allMessages = interfaceChat.getMessages(workflowId)
|
||||
allMessages = interfaceDbChat.getMessages(workflowId)
|
||||
|
||||
# Apply selective data transfer if messageId is provided
|
||||
if messageId:
|
||||
|
|
@ -288,10 +288,10 @@ async def delete_workflow(
|
|||
"""Deletes a workflow and its associated data."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Get raw workflow data from database to check permissions
|
||||
workflows = interfaceChat.db.getRecordset(ChatWorkflow, recordFilter={"id": workflowId})
|
||||
workflows = interfaceDbChat.db.getRecordset(ChatWorkflow, recordFilter={"id": workflowId})
|
||||
if not workflows:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
|
|
@ -301,14 +301,14 @@ async def delete_workflow(
|
|||
workflow_data = workflows[0]
|
||||
|
||||
# Check if user has permission to delete using the interface's permission system
|
||||
if not interfaceChat._canModify("workflows", workflowId):
|
||||
if not interfaceDbChat._canModify("workflows", workflowId):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="You don't have permission to delete this workflow"
|
||||
)
|
||||
|
||||
# Delete workflow
|
||||
success = interfaceChat.deleteWorkflow(workflowId)
|
||||
success = interfaceDbChat.deleteWorkflow(workflowId)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
|
|
@ -343,10 +343,10 @@ async def delete_workflow_message(
|
|||
"""Delete a message from a workflow."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Verify workflow exists
|
||||
workflow = interfaceChat.getWorkflow(workflowId)
|
||||
workflow = interfaceDbChat.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
|
|
@ -354,7 +354,7 @@ async def delete_workflow_message(
|
|||
)
|
||||
|
||||
# Delete the message
|
||||
success = interfaceChat.deleteMessage(workflowId, messageId)
|
||||
success = interfaceDbChat.deleteMessage(workflowId, messageId)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
|
|
@ -366,7 +366,7 @@ async def delete_workflow_message(
|
|||
messageIds = workflow.get("messageIds", [])
|
||||
if messageId in messageIds:
|
||||
messageIds.remove(messageId)
|
||||
interfaceChat.updateWorkflow(workflowId, {"messageIds": messageIds})
|
||||
interfaceDbChat.updateWorkflow(workflowId, {"messageIds": messageIds})
|
||||
|
||||
return {
|
||||
"workflowId": workflowId,
|
||||
|
|
@ -394,10 +394,10 @@ async def delete_file_from_message(
|
|||
"""Delete a file reference from a message in a workflow."""
|
||||
try:
|
||||
# Get service center
|
||||
interfaceChat = getServiceChat(currentUser)
|
||||
interfaceDbChat = getServiceChat(currentUser)
|
||||
|
||||
# Verify workflow exists
|
||||
workflow = interfaceChat.getWorkflow(workflowId)
|
||||
workflow = interfaceDbChat.getWorkflow(workflowId)
|
||||
if not workflow:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
|
|
@ -405,7 +405,7 @@ async def delete_file_from_message(
|
|||
)
|
||||
|
||||
# Delete file reference from message
|
||||
success = interfaceChat.deleteFileFromMessage(workflowId, messageId, fileId)
|
||||
success = interfaceDbChat.deleteFileFromMessage(workflowId, messageId, fileId)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from slowapi.util import get_remote_address
|
|||
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.shared.timezoneUtils import get_utc_now, get_utc_timestamp
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
from modules.datamodels.datamodelUam import User, AuthAuthority
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
|
||||
|
|
|
|||
|
|
@ -243,21 +243,24 @@ class TokenManager:
|
|||
return None
|
||||
|
||||
# Convenience wrapper to fetch and ensure fresh token for a connection via interface layer
|
||||
def getFreshToken(self, interfaceApp, connectionId: str, secondsBeforeExpiry: int = 30 * 60) -> Optional[Token]:
|
||||
def getFreshToken(self, connectionId: str, secondsBeforeExpiry: int = 30 * 60) -> Optional[Token]:
|
||||
"""Return a fresh token for a connection, refreshing when expiring soon.
|
||||
|
||||
Reads the latest stored token via interfaceApp.getConnectionToken, then
|
||||
Reads the latest stored token via interface layer, then
|
||||
uses ensure_fresh_token to refresh if needed and persists the refreshed
|
||||
token via interfaceApp.saveConnectionToken.
|
||||
token via interface layer.
|
||||
"""
|
||||
try:
|
||||
token = interfaceApp.getConnectionToken(connectionId)
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
interfaceDbApp = getRootInterface()
|
||||
|
||||
token = interfaceDbApp.getConnectionToken(connectionId)
|
||||
if not token:
|
||||
return None
|
||||
return self.ensure_fresh_token(
|
||||
token,
|
||||
seconds_before_expiry=secondsBeforeExpiry,
|
||||
save_callback=lambda t: interfaceApp.saveConnectionToken(t)
|
||||
save_callback=lambda t: interfaceDbApp.saveConnectionToken(t)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"getFreshToken: Error fetching or refreshing token for connection {connectionId}: {e}")
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from starlette.responses import Response as StarletteResponse
|
|||
from typing import Callable
|
||||
import asyncio
|
||||
from modules.security.tokenRefreshService import token_refresh_service
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -160,7 +161,6 @@ class ProactiveTokenRefreshMiddleware(BaseHTTPMiddleware):
|
|||
Check if we should perform proactive refresh for this user
|
||||
"""
|
||||
try:
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
current_time = get_utc_timestamp()
|
||||
last_check = self.last_check.get(user_id, 0)
|
||||
|
||||
|
|
@ -184,8 +184,3 @@ class ProactiveTokenRefreshMiddleware(BaseHTTPMiddleware):
|
|||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in proactive token refresh for user {user_id}: {str(e)}")
|
||||
|
||||
def get_utc_timestamp():
|
||||
"""Get current UTC timestamp"""
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp as _get_utc_timestamp
|
||||
return _get_utc_timestamp()
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ to ensure users don't experience token expiration issues.
|
|||
import logging
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
from modules.interfaces.interfaceAppObjects import getInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface
|
||||
from modules.datamodels.datamodelUam import User, UserConnection, AuthAuthority
|
||||
from modules.datamodels.datamodelSecurity import Token
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
|
@ -159,7 +159,7 @@ class TokenRefreshService:
|
|||
logger.debug(f"Starting silent token refresh for user {user_id}")
|
||||
|
||||
# Get user interface
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
root_interface = getRootInterface()
|
||||
|
||||
# Get user connections
|
||||
|
|
@ -226,7 +226,7 @@ class TokenRefreshService:
|
|||
logger.debug(f"Starting proactive token refresh for user {user_id}")
|
||||
|
||||
# Get user interface
|
||||
from modules.interfaces.interfaceAppObjects import getRootInterface
|
||||
from modules.interfaces.interfaceDbAppObjects import getRootInterface
|
||||
root_interface = getRootInterface()
|
||||
|
||||
# Get user connections
|
||||
|
|
|
|||
|
|
@ -44,14 +44,14 @@ class Services:
|
|||
|
||||
# Initialize interfaces
|
||||
|
||||
from modules.interfaces.interfaceChatObjects import getInterface as getChatInterface
|
||||
self.interfaceChat = getChatInterface(user)
|
||||
from modules.interfaces.interfaceDbChatObjects import getInterface as getChatInterface
|
||||
self.interfaceDbChat = getChatInterface(user)
|
||||
|
||||
from modules.interfaces.interfaceAppObjects import getInterface as getAppInterface
|
||||
self.interfaceApp = getAppInterface(user)
|
||||
from modules.interfaces.interfaceDbAppObjects import getInterface as getAppInterface
|
||||
self.interfaceDbApp = getAppInterface(user)
|
||||
|
||||
from modules.interfaces.interfaceComponentObjects import getInterface as getComponentInterface
|
||||
self.interfaceComponent = getComponentInterface(user)
|
||||
from modules.interfaces.interfaceDbComponentObjects import getInterface as getComponentInterface
|
||||
self.interfaceDbComponent = getComponentInterface(user)
|
||||
|
||||
# Initialize service packages
|
||||
|
||||
|
|
@ -78,7 +78,12 @@ class Services:
|
|||
|
||||
from .serviceWeb.mainServiceWeb import WebService
|
||||
self.web = PublicService(WebService(self))
|
||||
|
||||
|
||||
from .serviceUtils.mainServiceUtils import UtilsService
|
||||
self.utils = PublicService(UtilsService(self))
|
||||
|
||||
async def extractContentFromDocument(self, prompt, document):
|
||||
return await self.services.documentExtraction.extractContentFromDocument(prompt, document)
|
||||
|
||||
def getInterface(user: User, workflow: ChatWorkflow) -> Services:
|
||||
return Services(user, workflow)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,17 @@
|
|||
import logging
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
from typing import Dict, Any, List, Optional, Tuple, Union
|
||||
|
||||
from modules.datamodels.datamodelChat import ChatDocument
|
||||
from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions
|
||||
from modules.datamodels.datamodelWeb import (
|
||||
WebSearchRequest,
|
||||
WebCrawlRequest,
|
||||
WebScrapeRequest,
|
||||
WebSearchActionResult,
|
||||
WebCrawlActionResult,
|
||||
WebScrapeActionResult,
|
||||
)
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
|
||||
|
||||
|
|
@ -14,7 +22,7 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class AiService:
|
||||
"""Centralized AI service orchestrating documents, model selection and failover.
|
||||
"""Centralized AI service orchestrating documents, model selection, failover, and web operations.
|
||||
"""
|
||||
|
||||
def __init__(self, serviceCenter=None) -> None:
|
||||
|
|
@ -25,16 +33,25 @@ class AiService:
|
|||
"""
|
||||
self.serviceCenter = serviceCenter
|
||||
# Only depend on interfaces
|
||||
self.aiObjects = AiObjects()
|
||||
self.aiObjects = None # Will be initialized in create()
|
||||
self.documentExtractor = DocumentExtractionService()
|
||||
|
||||
async def callAi(
|
||||
@classmethod
|
||||
async def create(cls, serviceCenter=None) -> "AiService":
|
||||
"""Create AiService instance with all connectors initialized."""
|
||||
instance = cls(serviceCenter)
|
||||
instance.aiObjects = await AiObjects.create()
|
||||
return instance
|
||||
|
||||
# AI Text Generation
|
||||
async def callAiText(
|
||||
self,
|
||||
prompt: str,
|
||||
documents: Optional[List[ChatDocument]] = None,
|
||||
processDocumentsIndividually: bool = False,
|
||||
options: Optional[AiCallOptions] = None,
|
||||
) -> str:
|
||||
"""Call AI for text generation using interface.call()."""
|
||||
try:
|
||||
documentContent = ""
|
||||
if documents:
|
||||
|
|
@ -55,9 +72,91 @@ class AiService:
|
|||
response = await self.aiObjects.call(request)
|
||||
return response.content
|
||||
except Exception as e:
|
||||
logger.error(f"Error in centralized AI call: {str(e)}")
|
||||
logger.error(f"Error in AI text generation: {str(e)}")
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
# AI Image Analysis
|
||||
async def callAiImage(
|
||||
self,
|
||||
prompt: str,
|
||||
imageData: Union[str, bytes],
|
||||
mimeType: str = None,
|
||||
options: Optional[AiCallOptions] = None,
|
||||
) -> str:
|
||||
"""Call AI for image analysis using interface.callImage()."""
|
||||
try:
|
||||
return await self.aiObjects.callImage(prompt, imageData, mimeType, options)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in AI image analysis: {str(e)}")
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
# AI Image Generation
|
||||
async def generateImage(
|
||||
self,
|
||||
prompt: str,
|
||||
size: str = "1024x1024",
|
||||
quality: str = "standard",
|
||||
style: str = "vivid",
|
||||
options: Optional[AiCallOptions] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate an image using AI using interface.generateImage()."""
|
||||
try:
|
||||
return await self.aiObjects.generateImage(prompt, size, quality, style, options)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in AI image generation: {str(e)}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
# Web Research (using LangDoc AI)
|
||||
async def webResearch(
|
||||
self,
|
||||
query: str,
|
||||
context: str = "",
|
||||
options: Optional[AiCallOptions] = None,
|
||||
) -> str:
|
||||
"""Perform web research using LangDoc AI via interface.webQuery()."""
|
||||
try:
|
||||
return await self.aiObjects.webQuery(query, context, options)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in web research: {str(e)}")
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
# Web Search (using Tavily)
|
||||
async def webSearch(
|
||||
self,
|
||||
request: WebSearchRequest,
|
||||
) -> WebSearchActionResult:
|
||||
"""Perform web search using Tavily via interface.webSearch()."""
|
||||
try:
|
||||
return await self.aiObjects.webSearch(request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in web search: {str(e)}")
|
||||
return WebSearchActionResult(success=False, error=str(e))
|
||||
|
||||
# Web Crawl (using Tavily)
|
||||
async def webCrawl(
|
||||
self,
|
||||
request: WebCrawlRequest,
|
||||
) -> WebCrawlActionResult:
|
||||
"""Crawl web pages using Tavily via interface.webCrawl()."""
|
||||
try:
|
||||
return await self.aiObjects.webCrawl(request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in web crawl: {str(e)}")
|
||||
return WebCrawlActionResult(success=False, error=str(e))
|
||||
|
||||
# Web Scrape (using Tavily)
|
||||
async def webScrape(
|
||||
self,
|
||||
request: WebScrapeRequest,
|
||||
) -> WebScrapeActionResult:
|
||||
"""Scrape web content using Tavily via interface.webScrape()."""
|
||||
try:
|
||||
return await self.aiObjects.webScrape(request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in web scrape: {str(e)}")
|
||||
return WebScrapeActionResult(success=False, error=str(e))
|
||||
|
||||
async def _processDocumentsForAi(
|
||||
self,
|
||||
documents: List[ChatDocument],
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from pathlib import Path
|
|||
import xml.etree.ElementTree as ET
|
||||
from bs4 import BeautifulSoup
|
||||
import uuid
|
||||
from modules.services.serviceDocument.documentUtility import (
|
||||
from modules.services.serviceDocument.subDocumentUtility import (
|
||||
getFileExtension,
|
||||
getMimeTypeFromExtension,
|
||||
detectMimeTypeFromContent,
|
||||
|
|
@ -18,9 +18,11 @@ from modules.services.serviceDocument.documentUtility import (
|
|||
)
|
||||
|
||||
from modules.datamodels.datamodelWorkflow import ExtractedContent
|
||||
from modules.datamodels.datamodelChat import ContentItem, ContentMetadata
|
||||
from modules.datamodels.datamodelChat import ContentItem, ContentMetadata, ChatDocument
|
||||
from modules.services.serviceNeutralization.mainServiceNeutralization import NeutralizationService
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -280,7 +282,7 @@ class DocumentExtractionService:
|
|||
# Decode base64 if needed
|
||||
if base64Encoded:
|
||||
fileData = base64.b64decode(fileData)
|
||||
# Use documentUtility for mime type detection
|
||||
# Use subDocumentUtility for mime type detection
|
||||
if mimeType == "application/octet-stream":
|
||||
mimeType = detectMimeTypeFromData(fileData, fileName, self._serviceCenter)
|
||||
# Process document based on type
|
||||
|
|
@ -335,7 +337,7 @@ class DocumentExtractionService:
|
|||
content_size = len(content.encode('utf-8'))
|
||||
|
||||
|
||||
# Use documentUtility for mime type
|
||||
# Use subDocumentUtility for mime type
|
||||
mime_type = getMimeTypeFromExtension(getFileExtension(fileName))
|
||||
return [ContentItem(
|
||||
label="main",
|
||||
|
|
@ -1421,11 +1423,8 @@ class DocumentExtractionService:
|
|||
|
||||
Original prompt: {prompt}
|
||||
"""
|
||||
from modules.datamodels.datamodelChat import ChatDocument
|
||||
image_doc = ChatDocument(fileData=chunk, fileName="image", mimeType=mimeType)
|
||||
# Use direct import to avoid circular dependency
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
aiService = AiService(AiObjects())
|
||||
processedContent = await aiService.callAi(
|
||||
prompt=imagePrompt,
|
||||
|
|
@ -1463,8 +1462,6 @@ class DocumentExtractionService:
|
|||
processedContent = contentToProcess
|
||||
else:
|
||||
# Use direct import to avoid circular dependency
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
||||
aiService = AiService(AiObjects())
|
||||
processedContent = await aiService.callAi(
|
||||
prompt=aiPrompt,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional
|
|||
from datetime import datetime, UTC
|
||||
import re
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.services.serviceDocument.documentUtility import (
|
||||
from modules.services.serviceDocument.subDocumentUtility import (
|
||||
getFileExtension,
|
||||
getMimeTypeFromExtension,
|
||||
detectMimeTypeFromContent,
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class NeutralizationService:
|
|||
NamesToParse: List of names to parse and replace (case-insensitive)
|
||||
"""
|
||||
self.serviceCenter = serviceCenter
|
||||
self.interfaceApp = serviceCenter.interfaceApp
|
||||
self.interfaceDbApp = serviceCenter.interfaceDbApp
|
||||
|
||||
# Initialize anonymization processors
|
||||
self.NamesToParse = NamesToParse or []
|
||||
|
|
@ -44,15 +44,15 @@ class NeutralizationService:
|
|||
|
||||
def getConfig(self) -> Optional[DataNeutraliserConfig]:
|
||||
"""Get the neutralization configuration for the current user's mandate"""
|
||||
if not self.interfaceApp:
|
||||
if not self.interfaceDbApp:
|
||||
return None
|
||||
return self.interfaceApp.getNeutralizationConfig()
|
||||
return self.interfaceDbApp.getNeutralizationConfig()
|
||||
|
||||
def saveConfig(self, config_data: Dict[str, Any]) -> DataNeutraliserConfig:
|
||||
"""Save or update the neutralization configuration"""
|
||||
if not self.interfaceApp:
|
||||
if not self.interfaceDbApp:
|
||||
raise ValueError("User context required for saving configuration")
|
||||
return self.interfaceApp.createOrUpdateNeutralizationConfig(config_data)
|
||||
return self.interfaceDbApp.createOrUpdateNeutralizationConfig(config_data)
|
||||
|
||||
# Public API: process text or file
|
||||
|
||||
|
|
@ -62,18 +62,18 @@ class NeutralizationService:
|
|||
|
||||
def processFile(self, fileId: str) -> Dict[str, Any]:
|
||||
"""Neutralize a file referenced by its fileId using app interface."""
|
||||
if not self.interfaceApp:
|
||||
if not self.interfaceDbApp:
|
||||
raise ValueError("User context is required to process a file by fileId")
|
||||
# Fetch file data and metadata
|
||||
fileInfo = None
|
||||
try:
|
||||
# getFile returns an object; fallback to dict-like
|
||||
fileInfo = self.interfaceApp.getFile(fileId)
|
||||
fileInfo = self.interfaceDbApp.getFile(fileId)
|
||||
except Exception:
|
||||
fileInfo = None
|
||||
fileName = getattr(fileInfo, 'fileName', None) if fileInfo else None
|
||||
mimeType = getattr(fileInfo, 'mimeType', None) if fileInfo else None
|
||||
fileData = self.interfaceApp.getFileData(fileId)
|
||||
fileData = self.interfaceDbApp.getFileData(fileId)
|
||||
if not fileData:
|
||||
raise ValueError(f"No file data found for fileId: {fileId}")
|
||||
|
||||
|
|
@ -103,17 +103,17 @@ class NeutralizationService:
|
|||
return result
|
||||
|
||||
def resolveText(self, text: str) -> str:
|
||||
if not self.interfaceApp:
|
||||
if not self.interfaceDbApp:
|
||||
return text
|
||||
try:
|
||||
placeholder_pattern = r'\[([a-z]+)\.([a-f0-9-]{36})\]'
|
||||
matches = re.findall(placeholder_pattern, text)
|
||||
resolved_text = text
|
||||
for placeholder_type, uid in matches:
|
||||
attributes = self.interfaceApp.db.getRecordset(
|
||||
attributes = self.interfaceDbApp.db.getRecordset(
|
||||
DataNeutralizerAttributes,
|
||||
recordFilter={
|
||||
"mandateId": self.interfaceApp.mandateId,
|
||||
"mandateId": self.interfaceDbApp.mandateId,
|
||||
"id": uid
|
||||
}
|
||||
)
|
||||
|
|
@ -124,7 +124,27 @@ class NeutralizationService:
|
|||
return resolved_text
|
||||
except Exception:
|
||||
return text
|
||||
|
||||
|
||||
def getAttributes(self) -> List[DataNeutralizerAttributes]:
|
||||
"""Get all neutralization attributes for the current user's mandate"""
|
||||
if not self.interfaceDbApp:
|
||||
return []
|
||||
try:
|
||||
return self.interfaceDbApp.db.getRecordset(
|
||||
DataNeutralizerAttributes,
|
||||
recordFilter={"mandateId": self.interfaceDbApp.mandateId}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting neutralization attributes: {str(e)}")
|
||||
return []
|
||||
|
||||
def deleteNeutralizationAttributes(self, fileId: str) -> bool:
|
||||
"""Delete neutralization attributes for a specific file"""
|
||||
if not self.interfaceDbApp:
|
||||
return False
|
||||
return self.interfaceDbApp.deleteNeutralizationAttributes(fileId)
|
||||
|
||||
|
||||
# Helper functions
|
||||
|
||||
def _neutralizeText(self, text: str, textType: str = None) -> Dict[str, Any]:
|
||||
|
|
@ -186,19 +206,6 @@ class NeutralizationService:
|
|||
processed_info={'type': 'error', 'error': str(e)}
|
||||
).model_dump()
|
||||
|
||||
def _getAttributes(self) -> List[DataNeutralizerAttributes]:
|
||||
"""Get all neutralization attributes for the current user's mandate"""
|
||||
if not self.interfaceApp:
|
||||
return []
|
||||
try:
|
||||
return self.interfaceApp.db.getRecordset(
|
||||
DataNeutralizerAttributes,
|
||||
recordFilter={"mandateId": self.interfaceApp.mandateId}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting neutralization attributes: {str(e)}")
|
||||
return []
|
||||
|
||||
def _getContentTypeFromMime(self, mime_type: str) -> str:
|
||||
"""Determine content type from MIME type for neutralization processing"""
|
||||
if mime_type.startswith('text/'):
|
||||
|
|
|
|||
|
|
@ -19,18 +19,17 @@ class SharepointService:
|
|||
Args:
|
||||
serviceCenter: Service center instance for accessing other services
|
||||
|
||||
Use setAccessToken() method to configure the access token before making API calls.
|
||||
Use setAccessTokenFromConnection() method to configure the access token before making API calls.
|
||||
"""
|
||||
self.serviceCenter = serviceCenter
|
||||
self.access_token = None
|
||||
self.base_url = "https://graph.microsoft.com/v1.0"
|
||||
|
||||
def setAccessToken(self, userConnection, interfaceApp) -> bool:
|
||||
def setAccessTokenFromConnection(self, userConnection) -> bool:
|
||||
"""Set access token from UserConnection.
|
||||
|
||||
Args:
|
||||
userConnection: UserConnection object containing token information
|
||||
interfaceApp: InterfaceApp instance used by TokenManager to resolve the token
|
||||
|
||||
Returns:
|
||||
bool: True if token was set successfully, False otherwise
|
||||
|
|
@ -42,7 +41,7 @@ class SharepointService:
|
|||
|
||||
# Get a fresh token for this specific connection
|
||||
from modules.security.tokenManager import TokenManager
|
||||
token = TokenManager().getFreshToken(interfaceApp, userConnection.id)
|
||||
token = TokenManager().getFreshToken(userConnection.id)
|
||||
if not token:
|
||||
logger.error(f"No token found for connection {userConnection.id}")
|
||||
return False
|
||||
|
|
@ -59,8 +58,8 @@ class SharepointService:
|
|||
"""Make a Microsoft Graph API call with proper error handling."""
|
||||
try:
|
||||
if self.access_token is None:
|
||||
logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
|
||||
return {"error": "Access token is not set. Please call setAccessToken() before using the SharePoint service."}
|
||||
logger.error("Access token is not set. Please call setAccessTokenFromConnection() before using the SharePoint service.")
|
||||
return {"error": "Access token is not set. Please call setAccessTokenFromConnection() before using the SharePoint service."}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.access_token}",
|
||||
|
|
@ -318,7 +317,7 @@ class SharepointService:
|
|||
"""Download a file from SharePoint."""
|
||||
try:
|
||||
if self.access_token is None:
|
||||
logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
|
||||
logger.error("Access token is not set. Please call setAccessTokenFromConnection() before using the SharePoint service.")
|
||||
return None
|
||||
|
||||
endpoint = f"sites/{site_id}/drive/items/{file_id}/content"
|
||||
|
|
@ -458,7 +457,7 @@ class SharepointService:
|
|||
"""Download a file by its path within a site."""
|
||||
try:
|
||||
if self.access_token is None:
|
||||
logger.error("Access token is not set. Please call setAccessToken() before using the SharePoint service.")
|
||||
logger.error("Access token is not set. Please call setAccessTokenFromConnection() before using the SharePoint service.")
|
||||
return None
|
||||
|
||||
# Clean the path
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from modules.interfaces.interfaceTicketObjects import createTicketInterfaceByType
|
||||
from modules.interfaces.interfaceTicketObjects import _createTicketInterfaceByType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ class TicketService:
|
|||
"""
|
||||
self.serviceCenter = serviceCenter
|
||||
|
||||
async def createTicketInterfaceByType(
|
||||
async def _createTicketInterfaceByType(
|
||||
self,
|
||||
taskSyncDefinition: Dict[str, Any],
|
||||
connectorType: str,
|
||||
|
|
@ -34,7 +34,7 @@ class TicketService:
|
|||
Returns:
|
||||
Ticket interface instance
|
||||
"""
|
||||
return await createTicketInterfaceByType(
|
||||
return await _createTicketInterfaceByType(
|
||||
taskSyncDefinition=taskSyncDefinition,
|
||||
connectorType=connectorType,
|
||||
connectorParams=connectorParams
|
||||
|
|
|
|||
142
modules/services/serviceUtils/mainServiceUtils.py
Normal file
142
modules/services/serviceUtils/mainServiceUtils.py
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
"""
|
||||
Utility service for common operations across the gateway.
|
||||
Provides centralized access to configuration, events, and other utilities.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any, Optional, Dict, Callable
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
from modules.shared.eventManagement import eventManager
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.security.tokenManager import TokenManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class UtilsService:
|
||||
"""Utility service providing common operations."""
|
||||
|
||||
def __init__(self, services):
|
||||
self.services = services
|
||||
|
||||
def eventRegisterCron(self, job_id: str, func: Callable, cron_kwargs: Dict[str, Any],
|
||||
replace_existing: bool = True, coalesce: bool = True,
|
||||
max_instances: int = 1, misfire_grace_time: int = 1800):
|
||||
"""
|
||||
Register a cron job with the event manager.
|
||||
|
||||
Args:
|
||||
job_id: Unique identifier for the job
|
||||
func: Function to execute
|
||||
cron_kwargs: Cron schedule parameters
|
||||
replace_existing: Whether to replace existing job with same ID
|
||||
coalesce: Whether to coalesce multiple pending executions
|
||||
max_instances: Maximum number of concurrent instances
|
||||
misfire_grace_time: Grace time for misfired jobs in seconds
|
||||
"""
|
||||
try:
|
||||
eventManager.register_cron(
|
||||
job_id=job_id,
|
||||
func=func,
|
||||
cron_kwargs=cron_kwargs,
|
||||
replace_existing=replace_existing,
|
||||
coalesce=coalesce,
|
||||
max_instances=max_instances,
|
||||
misfire_grace_time=misfire_grace_time
|
||||
)
|
||||
logger.info(f"Registered cron job '{job_id}' with schedule: {cron_kwargs}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error registering cron job '{job_id}': {str(e)}")
|
||||
|
||||
def eventRegisterInterval(self, job_id: str, func: Callable, seconds: Optional[int] = None,
|
||||
minutes: Optional[int] = None, hours: Optional[int] = None,
|
||||
replace_existing: bool = True, coalesce: bool = True,
|
||||
max_instances: int = 1, misfire_grace_time: int = 1800):
|
||||
"""
|
||||
Register an interval job with the event manager.
|
||||
|
||||
Args:
|
||||
job_id: Unique identifier for the job
|
||||
func: Function to execute
|
||||
seconds: Interval in seconds
|
||||
minutes: Interval in minutes
|
||||
hours: Interval in hours
|
||||
replace_existing: Whether to replace existing job with same ID
|
||||
coalesce: Whether to coalesce multiple pending executions
|
||||
max_instances: Maximum number of concurrent instances
|
||||
misfire_grace_time: Grace time for misfired jobs in seconds
|
||||
"""
|
||||
try:
|
||||
eventManager.register_interval(
|
||||
job_id=job_id,
|
||||
func=func,
|
||||
seconds=seconds,
|
||||
minutes=minutes,
|
||||
hours=hours,
|
||||
replace_existing=replace_existing,
|
||||
coalesce=coalesce,
|
||||
max_instances=max_instances,
|
||||
misfire_grace_time=misfire_grace_time
|
||||
)
|
||||
logger.info(f"Registered interval job '{job_id}' (h={hours}, m={minutes}, s={seconds})")
|
||||
except Exception as e:
|
||||
logger.error(f"Error registering interval job '{job_id}': {str(e)}")
|
||||
|
||||
def eventRemove(self, job_id: str):
|
||||
"""
|
||||
Remove a scheduled job from the event manager.
|
||||
|
||||
Args:
|
||||
job_id: ID of the job to remove
|
||||
"""
|
||||
try:
|
||||
eventManager.remove(job_id)
|
||||
logger.info(f"Removed job '{job_id}'")
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing job '{job_id}': {str(e)}")
|
||||
|
||||
def configGet(self, key: str, default: Any = None, user_id: str = "system") -> Any:
|
||||
"""
|
||||
Get a configuration value with optional default.
|
||||
|
||||
Args:
|
||||
key: Configuration key to retrieve
|
||||
default: Default value if key not found
|
||||
user_id: User ID for audit logging (default: "system")
|
||||
|
||||
Returns:
|
||||
Configuration value or default
|
||||
"""
|
||||
try:
|
||||
return APP_CONFIG.get(key, default, user_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting config '{key}': {str(e)}")
|
||||
return default
|
||||
|
||||
def getUtcTimestamp(self) -> float:
|
||||
"""
|
||||
Get current UTC timestamp.
|
||||
|
||||
Returns:
|
||||
float: Current UTC timestamp in seconds
|
||||
"""
|
||||
try:
|
||||
return get_utc_timestamp()
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting UTC timestamp: {str(e)}")
|
||||
return 0.0
|
||||
|
||||
def getFreshConnectionToken(self, connectionId: str):
|
||||
"""
|
||||
Get a fresh token for a specific connection.
|
||||
|
||||
Args:
|
||||
connectionId: ID of the connection to get token for
|
||||
|
||||
Returns:
|
||||
Token object or None if not found/expired
|
||||
"""
|
||||
try:
|
||||
return TokenManager().getFreshToken(connectionId)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting fresh token for connection {connectionId}: {str(e)}")
|
||||
return None
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
import logging
|
||||
from typing import Optional, List
|
||||
|
||||
from modules.datamodels.datamodelWeb import (
|
||||
WebSearchRequest,
|
||||
WebCrawlRequest,
|
||||
WebScrapeRequest,
|
||||
WebSearchActionResult,
|
||||
WebCrawlActionResult,
|
||||
WebScrapeActionResult,
|
||||
)
|
||||
from modules.interfaces.interfaceWebObjects import WebInterface
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WebService:
|
||||
"""Centralized Web service providing wrappers around web interface actions.
|
||||
"""
|
||||
|
||||
def __init__(self, serviceCenter=None) -> None:
|
||||
self.serviceCenter = serviceCenter
|
||||
|
||||
async def webSearch(self, request: WebSearchRequest) -> WebSearchActionResult:
|
||||
try:
|
||||
web_interface = await WebInterface.create()
|
||||
return await web_interface.search(request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in webSearch: {str(e)}")
|
||||
raise
|
||||
|
||||
async def webCrawl(self, request: WebCrawlRequest) -> WebCrawlActionResult:
|
||||
try:
|
||||
web_interface = await WebInterface.create()
|
||||
return await web_interface.crawl(request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in webCrawl: {str(e)}")
|
||||
raise
|
||||
|
||||
async def webScrape(self, request: WebScrapeRequest) -> WebScrapeActionResult:
|
||||
try:
|
||||
web_interface = await WebInterface.create()
|
||||
return await web_interface.scrape(request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in webScrape: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
|
|
@ -5,8 +5,10 @@ from modules.datamodels.datamodelUam import User, UserConnection
|
|||
from modules.datamodels.datamodelChat import ChatDocument, ChatMessage
|
||||
from modules.datamodels.datamodelChat import ExtractedContent
|
||||
from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
from modules.services.serviceDocument.documentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
|
||||
from modules.services.serviceDocument.subDocumentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
from modules.security.tokenManager import TokenManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -17,9 +19,9 @@ class WorkflowService:
|
|||
self.serviceCenter = serviceCenter
|
||||
self.user = serviceCenter.user
|
||||
self.workflow = serviceCenter.workflow
|
||||
self.interfaceChat = serviceCenter.interfaceChat
|
||||
self.interfaceComponent = serviceCenter.interfaceComponent
|
||||
self.interfaceApp = serviceCenter.interfaceApp
|
||||
self.interfaceDbChat = serviceCenter.interfaceDbChat
|
||||
self.interfaceDbComponent = serviceCenter.interfaceDbComponent
|
||||
self.interfaceDbApp = serviceCenter.interfaceDbApp
|
||||
|
||||
async def summarizeChat(self, messages: List[ChatMessage]) -> str:
|
||||
"""
|
||||
|
|
@ -55,7 +57,6 @@ class WorkflowService:
|
|||
Please provide a comprehensive summary of this conversation."""
|
||||
|
||||
# Get summary using AI service directly (avoiding circular dependency)
|
||||
from modules.services.serviceAi.mainServiceAi import AiService
|
||||
ai_service = AiService(self)
|
||||
return await ai_service.callAi(
|
||||
prompt=prompt,
|
||||
|
|
@ -221,8 +222,7 @@ class WorkflowService:
|
|||
try:
|
||||
# Get a fresh token via TokenManager convenience method
|
||||
logger.debug(f"Getting fresh token for connection {connection.id}")
|
||||
from modules.security.tokenManager import TokenManager
|
||||
token = TokenManager().getFreshToken(self.interfaceApp, connection.id)
|
||||
token = TokenManager().getFreshToken(connection.id)
|
||||
if token:
|
||||
if hasattr(token, 'expiresAt') and token.expiresAt:
|
||||
current_time = get_utc_timestamp()
|
||||
|
|
@ -256,7 +256,7 @@ class WorkflowService:
|
|||
try:
|
||||
if not authority or not externalUsername:
|
||||
return None
|
||||
user_connections = self.interfaceApp.getUserConnections(self.user.id)
|
||||
user_connections = self.interfaceDbApp.getUserConnections(self.user.id)
|
||||
for connection in user_connections:
|
||||
# Normalize authority for comparison (enum vs string)
|
||||
connection_authority = connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority)
|
||||
|
|
@ -283,7 +283,7 @@ class WorkflowService:
|
|||
conn_id = parts[3]
|
||||
|
||||
# Get user connections through AppObjects interface
|
||||
user_connections = self.interfaceApp.getUserConnections(self.user.id)
|
||||
user_connections = self.interfaceDbApp.getUserConnections(self.user.id)
|
||||
|
||||
# Find matching connection
|
||||
for conn in user_connections:
|
||||
|
|
@ -297,7 +297,7 @@ class WorkflowService:
|
|||
|
||||
def getFileInfo(self, fileId: str) -> Dict[str, Any]:
|
||||
"""Get file information"""
|
||||
file_item = self.interfaceComponent.getFile(fileId)
|
||||
file_item = self.interfaceDbComponent.getFile(fileId)
|
||||
if file_item:
|
||||
return {
|
||||
"id": file_item.id,
|
||||
|
|
@ -311,7 +311,7 @@ class WorkflowService:
|
|||
|
||||
def getFileData(self, fileId: str) -> bytes:
|
||||
"""Get file data by ID"""
|
||||
return self.interfaceComponent.getFileData(fileId)
|
||||
return self.interfaceDbComponent.getFileData(fileId)
|
||||
|
||||
async def extractContentFromDocument(self, prompt: str, document: ChatDocument) -> ExtractedContent:
|
||||
"""Extract content from ChatDocument using prompt"""
|
||||
|
|
@ -350,7 +350,6 @@ class WorkflowService:
|
|||
raise RuntimeError(f"Document {document.id} properties are inaccessible and recovery failed. Diagnosis: {diagnosis}")
|
||||
|
||||
# Process with DocumentExtractionService directly (no circular dependency)
|
||||
from modules.services.serviceDocument.mainServiceDocumentExtraction import DocumentExtractionService
|
||||
docService = DocumentExtractionService(None) # Pass None to avoid circular dependency
|
||||
content_items = await docService.processFileData(
|
||||
fileData=fileData,
|
||||
|
|
@ -411,7 +410,7 @@ class WorkflowService:
|
|||
|
||||
# Try to access the file directly
|
||||
try:
|
||||
file_info = self.interfaceComponent.getFile(document.fileId)
|
||||
file_info = self.interfaceDbComponent.getFile(document.fileId)
|
||||
if file_info:
|
||||
diagnosis['file_exists'] = True
|
||||
diagnosis['file_info'] = {
|
||||
|
|
@ -442,7 +441,7 @@ class WorkflowService:
|
|||
logger.info(f"Attempting to recover document access for document {document.id}")
|
||||
|
||||
# Re-set the component interface
|
||||
document.setComponentInterface(self.interfaceComponent)
|
||||
document.setComponentInterface(self.interfaceDbComponent)
|
||||
|
||||
# Test if we can now access the fileName
|
||||
try:
|
||||
|
|
@ -466,15 +465,15 @@ class WorkflowService:
|
|||
else:
|
||||
content_bytes = content.encode('utf-8')
|
||||
|
||||
# Create the file (hash and size are computed inside interfaceComponent)
|
||||
file_item = self.interfaceComponent.createFile(
|
||||
# Create the file (hash and size are computed inside interfaceDbComponent)
|
||||
file_item = self.interfaceDbComponent.createFile(
|
||||
name=fileName,
|
||||
mimeType=mimeType,
|
||||
content=content_bytes
|
||||
)
|
||||
|
||||
# Then store the file data
|
||||
self.interfaceComponent.createFileData(file_item.id, content_bytes)
|
||||
self.interfaceDbComponent.createFileData(file_item.id, content_bytes)
|
||||
|
||||
# Get file info to copy attributes
|
||||
file_info = self.getFileInfo(file_item.id)
|
||||
|
|
@ -549,7 +548,7 @@ class WorkflowService:
|
|||
|
||||
# Persist changes to database if any updates were made
|
||||
if update_data:
|
||||
self.interfaceChat.updateWorkflow(self.workflow.id, update_data)
|
||||
self.interfaceDbChat.updateWorkflow(self.workflow.id, update_data)
|
||||
|
||||
logger.debug(f"Updated workflow context: Round {self.workflow.currentRound if hasattr(self.workflow, 'currentRound') else 'N/A'}, Task {self.workflow.currentTask if hasattr(self.workflow, 'currentTask') else 'N/A'}, Action {self.workflow.currentAction if hasattr(self.workflow, 'currentAction') else 'N/A'}")
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ from pathlib import Path
|
|||
from cryptography.fernet import Fernet
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
||||
# audit_logger imported lazily to avoid circular import
|
||||
|
||||
# Set up basic logging for configuration loading
|
||||
logging.basicConfig(
|
||||
|
|
|
|||
|
|
@ -9,20 +9,16 @@ from datetime import datetime, UTC
|
|||
|
||||
from modules.workflows.methods.methodBase import MethodBase, action
|
||||
from modules.datamodels.datamodelWorkflow import ActionResult
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MethodAi(MethodBase):
|
||||
"""AI processing methods."""
|
||||
|
||||
def __init__(self, service):
|
||||
super().__init__(service)
|
||||
def __init__(self, services):
|
||||
super().__init__(services)
|
||||
self.name = "ai"
|
||||
self.description = "AI processing methods"
|
||||
# Centralized services interface (for AI)
|
||||
from modules.services import getInterface as getServices
|
||||
self.services = getServices(self.service.user, self.service.workflow)
|
||||
|
||||
def _format_timestamp_for_filename(self) -> str:
|
||||
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
|
||||
|
|
@ -37,9 +33,13 @@ class MethodAi(MethodBase):
|
|||
aiPrompt (str): The AI prompt for processing
|
||||
documentList (list, optional): List of document references to include in context
|
||||
expectedDocumentFormats (list, optional): Expected output formats with extension, mimeType, description
|
||||
processingMode (str, optional): Processing mode ('basic', 'advanced', 'detailed') - defaults to 'basic'
|
||||
processingMode (str, optional): Processing mode - use 'basic', 'advanced', or 'detailed' (defaults to 'basic')
|
||||
includeMetadata (bool, optional): Whether to include metadata (default: True)
|
||||
customInstructions (str, optional): Additional custom instructions for the AI
|
||||
operationType (str, optional): Operation type - use 'general', 'generate_plan', 'analyse_content', 'generate_content', 'web_research', 'image_analysis', or 'image_generation'
|
||||
priority (str, optional): Priority level - use 'speed', 'quality', 'cost', or 'balanced'
|
||||
maxCost (float, optional): Maximum cost budget for the AI call
|
||||
maxProcessingTime (int, optional): Maximum processing time in seconds
|
||||
requiredTags (list, optional): Required model tags - use 'text', 'chat', 'reasoning', 'analysis', 'image', 'vision', 'web', 'search', etc.
|
||||
"""
|
||||
try:
|
||||
aiPrompt = parameters.get("aiPrompt")
|
||||
|
|
@ -49,7 +49,11 @@ class MethodAi(MethodBase):
|
|||
expectedDocumentFormats = parameters.get("expectedDocumentFormats", [])
|
||||
processingMode = parameters.get("processingMode", "basic")
|
||||
includeMetadata = parameters.get("includeMetadata", True)
|
||||
customInstructions = parameters.get("customInstructions", "")
|
||||
operationType = parameters.get("operationType", "general")
|
||||
priority = parameters.get("priority", "balanced")
|
||||
maxCost = parameters.get("maxCost")
|
||||
maxProcessingTime = parameters.get("maxProcessingTime")
|
||||
requiredTags = parameters.get("requiredTags")
|
||||
|
||||
if not aiPrompt:
|
||||
return ActionResult.isFailure(
|
||||
|
|
@ -69,11 +73,11 @@ class MethodAi(MethodBase):
|
|||
# Build context from documents if provided
|
||||
context = ""
|
||||
if documentList:
|
||||
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
|
||||
chatDocuments = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
|
||||
if chatDocuments:
|
||||
context_parts = []
|
||||
for doc in chatDocuments:
|
||||
file_info = self.service.getFileInfo(doc.fileId)
|
||||
file_info = self.services.workflow.getFileInfo(doc.fileId)
|
||||
|
||||
try:
|
||||
# Use the document content extraction service with the specific AI prompt context
|
||||
|
|
@ -96,7 +100,7 @@ class MethodAi(MethodBase):
|
|||
|
||||
logger.debug(f"Extracting content from {doc.fileName} with task-specific prompt: {extraction_prompt[:100]}...")
|
||||
|
||||
extracted_content = await self.service.extractContentFromDocument(
|
||||
extracted_content = await self.services.documentExtraction.extractContentFromDocument(
|
||||
prompt=extraction_prompt.strip(),
|
||||
document=doc
|
||||
)
|
||||
|
|
@ -186,7 +190,7 @@ class MethodAi(MethodBase):
|
|||
if documentList:
|
||||
for d in (chatDocuments or []):
|
||||
try:
|
||||
file_data = self.service.getFileData(d.fileId)
|
||||
file_data = self.services.workflow.getFileData(d.fileId)
|
||||
documents.append(
|
||||
ChatDocument(
|
||||
fileData=file_data,
|
||||
|
|
@ -200,22 +204,32 @@ class MethodAi(MethodBase):
|
|||
documents = None
|
||||
|
||||
output_format = output_extension.replace('.', '') or 'txt'
|
||||
|
||||
# Build options from parameters
|
||||
options = {
|
||||
"process_type": "text",
|
||||
"operation_type": operationType,
|
||||
"priority": priority,
|
||||
"compress_prompt": processingMode != "detailed",
|
||||
"compress_documents": True,
|
||||
"process_documents_individually": True,
|
||||
"processing_mode": processingMode,
|
||||
"result_format_requested": output_format,
|
||||
"include_metadata": includeMetadata
|
||||
}
|
||||
|
||||
# Add optional parameters if provided
|
||||
if maxCost is not None:
|
||||
options["max_cost"] = maxCost
|
||||
if maxProcessingTime is not None:
|
||||
options["max_processing_time"] = maxProcessingTime
|
||||
if requiredTags is not None:
|
||||
options["required_tags"] = requiredTags
|
||||
|
||||
result = await self.services.ai.callAi(
|
||||
prompt=call_prompt,
|
||||
documents=documents or None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "generate_content",
|
||||
"priority": "quality" if processingMode in ["advanced", "detailed"] else "speed",
|
||||
"compress_prompt": processingMode != "detailed",
|
||||
"compress_documents": True,
|
||||
"process_documents_individually": True,
|
||||
"processing_mode": processingMode,
|
||||
"result_format_requested": output_format,
|
||||
"include_metadata": includeMetadata,
|
||||
"max_cost": 0.05 if processingMode in ["advanced", "detailed"] else 0.02,
|
||||
"max_processing_time": 45 if processingMode in ["advanced", "detailed"] else 20
|
||||
}
|
||||
options=options
|
||||
)
|
||||
|
||||
# If expected JSON and too short/not JSON, retry with stricter JSON guardrails
|
||||
|
|
|
|||
|
|
@ -30,9 +30,9 @@ def action(func):
|
|||
class MethodBase:
|
||||
"""Base class for all methods"""
|
||||
|
||||
def __init__(self, serviceCenter: Any):
|
||||
"""Initialize method with service center"""
|
||||
self.service = serviceCenter
|
||||
def __init__(self, services: Any):
|
||||
"""Initialize method with services object"""
|
||||
self.services = services
|
||||
self.name: str
|
||||
self.description: str
|
||||
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
|
||||
|
|
|
|||
|
|
@ -10,21 +10,17 @@ from datetime import datetime, UTC
|
|||
|
||||
from modules.workflows.methods.methodBase import MethodBase, action
|
||||
from modules.datamodels.datamodelWorkflow import ActionResult, ChatDocument
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MethodDocument(MethodBase):
|
||||
"""Document method implementation for document operations"""
|
||||
|
||||
def __init__(self, serviceCenter: Any):
|
||||
def __init__(self, services):
|
||||
"""Initialize the document method"""
|
||||
super().__init__(serviceCenter)
|
||||
super().__init__(services)
|
||||
self.name = "document"
|
||||
self.description = "Handle document operations like extraction and analysis"
|
||||
# Centralized services interface (for AI)
|
||||
from modules.services import getInterface as getServices
|
||||
self.services = getServices(self.service.user, self.service.workflow)
|
||||
|
||||
def _format_timestamp_for_filename(self) -> str:
|
||||
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
|
||||
|
|
@ -59,7 +55,7 @@ class MethodDocument(MethodBase):
|
|||
error="AI prompt is required"
|
||||
)
|
||||
|
||||
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
|
||||
chatDocuments = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
|
||||
if not chatDocuments:
|
||||
return ActionResult.isFailure(
|
||||
error="No documents found for the provided reference"
|
||||
|
|
@ -70,12 +66,12 @@ class MethodDocument(MethodBase):
|
|||
file_infos = []
|
||||
|
||||
for chatDocument in chatDocuments:
|
||||
file_info = self.service.getFileInfo(chatDocument.fileId)
|
||||
file_info = self.services.workflow.getFileInfo(chatDocument.fileId)
|
||||
|
||||
try:
|
||||
# Use the document content extraction service with the specific AI prompt
|
||||
# This handles all document types (text, binary, image, etc.) intelligently
|
||||
extracted_content = await self.service.extractContentFromDocument(
|
||||
extracted_content = await self.services.documentExtraction.extractContentFromDocument(
|
||||
prompt=aiPrompt,
|
||||
document=chatDocument
|
||||
)
|
||||
|
|
@ -158,7 +154,7 @@ class MethodDocument(MethodBase):
|
|||
"content": final_content,
|
||||
"originalfileName": original_fileName,
|
||||
"fileInfos": [file_infos[i]] if includeMetadata and i < len(file_infos) else None,
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
logger.info(f"Created output document: {output_fileName} with {len(final_content)} characters")
|
||||
|
|
@ -210,7 +206,7 @@ class MethodDocument(MethodBase):
|
|||
)
|
||||
|
||||
# Get chat documents for original documents list
|
||||
chat_documents = self.service.getChatDocumentsFromDocumentList(document_list)
|
||||
chat_documents = self.services.workflow.getChatDocumentsFromDocumentList(document_list)
|
||||
logger.info(f"Found {len(chat_documents)} chat documents")
|
||||
|
||||
if not chat_documents:
|
||||
|
|
@ -231,7 +227,7 @@ class MethodDocument(MethodBase):
|
|||
if hasattr(chat_document, 'fileId') and chat_document.fileId:
|
||||
try:
|
||||
# Get file data directly without AI processing
|
||||
file_data = self.service.getFileData(chat_document.fileId)
|
||||
file_data = self.services.workflow.getFileData(chat_document.fileId)
|
||||
if file_data:
|
||||
# Check if it's text data and convert to string
|
||||
if isinstance(file_data, bytes):
|
||||
|
|
@ -321,7 +317,7 @@ class MethodDocument(MethodBase):
|
|||
"content": formatted_content,
|
||||
"outputFormat": target_format,
|
||||
"originalDocument": original_name,
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
logger.info(f"Generated document: {output_fileName} with {len(formatted_content)} characters")
|
||||
|
|
@ -403,7 +399,7 @@ class MethodDocument(MethodBase):
|
|||
"content": formatted_content,
|
||||
"outputFormat": target_format,
|
||||
"originalDocuments": original_file_names,
|
||||
"timestamp": get_utc_timestamp(),
|
||||
"timestamp": self.services.utils.getUtcTimestamp(),
|
||||
"merged": True
|
||||
}
|
||||
|
||||
|
|
@ -591,22 +587,22 @@ class MethodDocument(MethodBase):
|
|||
elif extension == ".json":
|
||||
# Simple JSON fallback
|
||||
content_escaped = content.replace('"', '\\"')
|
||||
timestamp = get_utc_timestamp()
|
||||
timestamp = self.services.utils.getUtcTimestamp()
|
||||
return f'{{"content": "{content_escaped}", "format": "json", "timestamp": {timestamp}}}'
|
||||
|
||||
elif extension == ".xml":
|
||||
# Simple XML fallback
|
||||
timestamp = get_utc_timestamp()
|
||||
timestamp = self.services.utils.getUtcTimestamp()
|
||||
return f'<?xml version="1.0" encoding="UTF-8"?>\n<document>\n<content>{content}</content>\n<format>xml</format>\n<timestamp>{timestamp}</timestamp>\n</document>'
|
||||
|
||||
elif extension == ".html":
|
||||
# Simple HTML fallback
|
||||
timestamp = int(get_utc_timestamp())
|
||||
timestamp = int(self.services.utils.getUtcTimestamp())
|
||||
return f'<!DOCTYPE html>\n<html>\n<head><meta charset="UTF-8"><title>Generated Document</title></head>\n<body>\n<pre>{content}</pre>\n<p><em>Generated on {timestamp}</em></p>\n</body>\n</html>'
|
||||
|
||||
elif extension == ".md":
|
||||
# Simple Markdown fallback
|
||||
timestamp = int(get_utc_timestamp())
|
||||
timestamp = int(self.services.utils.getUtcTimestamp())
|
||||
return f"# Generated Document\n\n{content}\n\n---\n*Generated on {timestamp}*"
|
||||
|
||||
else:
|
||||
|
|
@ -646,7 +642,7 @@ class MethodDocument(MethodBase):
|
|||
error="Prompt is required to specify what kind of report to generate"
|
||||
)
|
||||
|
||||
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
|
||||
chatDocuments = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
|
||||
logger.info(f"Retrieved {len(chatDocuments)} chat documents for report generation")
|
||||
|
||||
if not chatDocuments:
|
||||
|
|
@ -658,14 +654,14 @@ class MethodDocument(MethodBase):
|
|||
html_content = await self._generateHtmlReport(chatDocuments, title, includeMetadata, prompt)
|
||||
|
||||
# Create output fileName
|
||||
timestamp = int(get_utc_timestamp())
|
||||
timestamp = int(self.services.utils.getUtcTimestamp())
|
||||
output_fileName = f"report_{self._format_timestamp_for_filename()}.html"
|
||||
|
||||
result_data = {
|
||||
"documentCount": len(chatDocuments),
|
||||
"content": html_content,
|
||||
"title": title,
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
logger.info(f"Generated HTML report: {output_fileName} with {len(html_content)} characters")
|
||||
|
|
@ -698,7 +694,7 @@ class MethodDocument(MethodBase):
|
|||
|
||||
# Get actual file content using the document content extraction service
|
||||
try:
|
||||
extracted_content = await self.service.extractContentFromDocument(
|
||||
extracted_content = await self.services.documentExtraction.extractContentFromDocument(
|
||||
prompt="Extract readable text content for HTML report generation",
|
||||
document=doc
|
||||
)
|
||||
|
|
@ -728,7 +724,7 @@ class MethodDocument(MethodBase):
|
|||
|
||||
if not validDocuments:
|
||||
# No readable content; return a minimal valid HTML document
|
||||
timestamp = int(get_utc_timestamp())
|
||||
timestamp = int(self.services.utils.getUtcTimestamp())
|
||||
return f"<!DOCTYPE html><html><head><meta charset=\"UTF-8\"><title>{title}</title></head><body><h1>{title}</h1><p>Keine auswertbaren Inhalte gefunden.</p><p>Generated: {timestamp}</p></body></html>"
|
||||
|
||||
# Create AI prompt for comprehensive report generation using user's prompt
|
||||
|
|
@ -770,7 +766,7 @@ SOURCE DOCUMENT CONTENT:
|
|||
try:
|
||||
for d in validDocuments:
|
||||
try:
|
||||
data = self.service.getFileData(d.fileId) if hasattr(d, 'fileId') else None
|
||||
data = self.services.workflow.getFileData(d.fileId) if hasattr(d, 'fileId') else None
|
||||
if data:
|
||||
documents.append(ChatDocument(fileData=data, fileName=d.fileName, mimeType=d.mimeType))
|
||||
except Exception:
|
||||
|
|
|
|||
|
|
@ -14,22 +14,17 @@ import requests
|
|||
from modules.workflows.methods.methodBase import MethodBase, action
|
||||
from modules.datamodels.datamodelWorkflow import ActionResult, ChatDocument
|
||||
from modules.datamodels.datamodelUam import ConnectionStatus
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.services import getInterface as getServices
|
||||
from modules.security.tokenManager import TokenManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MethodOutlook(MethodBase):
|
||||
"""Outlook method implementation for email operations"""
|
||||
|
||||
def __init__(self, serviceCenter: Any):
|
||||
def __init__(self, services):
|
||||
"""Initialize the Outlook method"""
|
||||
super().__init__(serviceCenter)
|
||||
super().__init__(services)
|
||||
self.name = "outlook"
|
||||
self.description = "Handle Microsoft Outlook email operations"
|
||||
# Centralized services interface (for AI)
|
||||
self.services = getServices(self.service.user, self.service.workflow)
|
||||
|
||||
def _format_timestamp_for_filename(self) -> str:
|
||||
"""Format current timestamp as YYYYMMDD-hhmmss for filenames."""
|
||||
|
|
@ -43,28 +38,21 @@ class MethodOutlook(MethodBase):
|
|||
logger.debug(f"Getting Microsoft connection for reference: {connectionReference}")
|
||||
|
||||
# Get the connection from the service
|
||||
userConnection = self.service.getUserConnectionFromConnectionReference(connectionReference)
|
||||
userConnection = self.services.workflow.getUserConnectionFromConnectionReference(connectionReference)
|
||||
if not userConnection:
|
||||
logger.error(f"Connection not found: {connectionReference}")
|
||||
return None
|
||||
|
||||
logger.debug(f"Found connection: {userConnection.id}, status: {userConnection.status.value}, authority: {userConnection.authority.value}")
|
||||
|
||||
# Get a fresh token for this specific connection
|
||||
token = TokenManager().getFreshToken(self.service.interfaceApp, userConnection.id)
|
||||
# Get a fresh token for this connection
|
||||
token = self.services.utils.getFreshConnectionToken(userConnection.id)
|
||||
if not token:
|
||||
logger.error(f"Token not found for connection: {userConnection.id}")
|
||||
logger.error(f"Fresh token not found for connection: {userConnection.id}")
|
||||
logger.debug(f"Connection details: {userConnection}")
|
||||
return None
|
||||
|
||||
logger.debug(f"Token retrieved for connection {userConnection.id}")
|
||||
|
||||
# Check if token is expired
|
||||
if hasattr(token, 'expiresAt') and token.expiresAt:
|
||||
current_time = get_utc_timestamp()
|
||||
if current_time > token.expiresAt:
|
||||
logger.error(f"Token for connection {userConnection.id} is expired (expiresAt: {token.expiresAt}, current: {current_time})")
|
||||
return None
|
||||
logger.debug(f"Fresh token retrieved for connection {userConnection.id}")
|
||||
|
||||
# Check if connection is active
|
||||
if userConnection.status.value != "active":
|
||||
|
|
@ -435,7 +423,7 @@ class MethodOutlook(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -492,7 +480,7 @@ class MethodOutlook(MethodBase):
|
|||
|
||||
# Get the composed email document
|
||||
|
||||
composed_email_docs = self.service.getChatDocumentsFromDocumentList([composed_email_ref])
|
||||
composed_email_docs = self.services.workflow.getChatDocumentsFromDocumentList([composed_email_ref])
|
||||
if not composed_email_docs or len(composed_email_docs) == 0:
|
||||
logger.error(f"Could not find composed email document: {composed_email_ref}")
|
||||
return ActionResult.isFailure(error=f"Could not find composed email document: {composed_email_ref}")
|
||||
|
|
@ -517,7 +505,7 @@ class MethodOutlook(MethodBase):
|
|||
# Read the actual file content from the database
|
||||
try:
|
||||
# Use the correct service interface to read file data
|
||||
file_content = self.service.getFileData(file_id)
|
||||
file_content = self.services.workflow.getFileData(file_id)
|
||||
if not file_content:
|
||||
logger.error(f"Failed to read file content for fileId: {file_id}")
|
||||
return ActionResult.isFailure(error="Failed to read composed email file content")
|
||||
|
|
@ -645,7 +633,7 @@ class MethodOutlook(MethodBase):
|
|||
|
||||
|
||||
# Get attachment document from service center
|
||||
attachment_docs = self.service.getChatDocumentsFromDocumentList([attachment_ref])
|
||||
attachment_docs = self.services.workflow.getChatDocumentsFromDocumentList([attachment_ref])
|
||||
if attachment_docs:
|
||||
for doc in attachment_docs:
|
||||
# Get the actual file content using fileId
|
||||
|
|
@ -653,7 +641,7 @@ class MethodOutlook(MethodBase):
|
|||
if file_id:
|
||||
try:
|
||||
# Read the actual file content
|
||||
file_content = self.service.getFileData(file_id)
|
||||
file_content = self.services.workflow.getFileData(file_id)
|
||||
if file_content:
|
||||
# Convert to base64 for Graph API
|
||||
if isinstance(file_content, bytes):
|
||||
|
|
@ -721,7 +709,7 @@ class MethodOutlook(MethodBase):
|
|||
"mailbox": connection.get('userEmail', 'Unknown'),
|
||||
"subject": subject,
|
||||
"recipients": to,
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -758,7 +746,7 @@ class MethodOutlook(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -964,7 +952,7 @@ class MethodOutlook(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -1085,7 +1073,7 @@ class MethodOutlook(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -1193,7 +1181,7 @@ class MethodOutlook(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -1332,7 +1320,7 @@ class MethodOutlook(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
},
|
||||
"mimeType": "application/json"
|
||||
}]
|
||||
|
|
@ -1405,7 +1393,7 @@ class MethodOutlook(MethodBase):
|
|||
|
||||
try:
|
||||
# Get document content from service center
|
||||
docs = self.service.getChatDocumentsFromDocumentList(documentList)
|
||||
docs = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
|
||||
if docs:
|
||||
for doc in docs:
|
||||
composition_documents.append(doc)
|
||||
|
|
@ -1415,7 +1403,7 @@ class MethodOutlook(MethodBase):
|
|||
if hasattr(doc, 'fileId') and doc.fileId:
|
||||
# Use the document content extraction service instead of raw file reading
|
||||
try:
|
||||
extracted_content = await self.service.extractContentFromDocument(
|
||||
extracted_content = await self.services.documentExtraction.extractContentFromDocument(
|
||||
prompt="Extract readable text content for email composition",
|
||||
document=doc
|
||||
)
|
||||
|
|
@ -1453,7 +1441,7 @@ class MethodOutlook(MethodBase):
|
|||
|
||||
try:
|
||||
# Get attachment documents from service center
|
||||
attachment_docs = self.service.getChatDocumentsFromDocumentList(attachmentDocumentList)
|
||||
attachment_docs = self.services.workflow.getChatDocumentsFromDocumentList(attachmentDocumentList)
|
||||
if attachment_docs:
|
||||
for doc in attachment_docs:
|
||||
# Add to attachments list
|
||||
|
|
@ -1520,7 +1508,7 @@ class MethodOutlook(MethodBase):
|
|||
if composition_documents:
|
||||
for d in composition_documents:
|
||||
try:
|
||||
data = self.service.getFileData(d.fileId) if hasattr(d, 'fileId') else None
|
||||
data = self.services.workflow.getFileData(d.fileId) if hasattr(d, 'fileId') else None
|
||||
if data:
|
||||
documents.append(ChatDocument(fileData=data, fileName=d.fileName, mimeType=d.mimeType))
|
||||
except Exception:
|
||||
|
|
@ -1577,7 +1565,7 @@ class MethodOutlook(MethodBase):
|
|||
"context": context,
|
||||
"recipient": recipient,
|
||||
"tone": tone,
|
||||
"timestamp": get_utc_timestamp(),
|
||||
"timestamp": self.services.utils.getUtcTimestamp(),
|
||||
"usage": "This document contains a composed email that can be used with the sendEmail action",
|
||||
"compositionDocuments": len(composition_documents),
|
||||
"attachmentDocuments": len(unique_attachments),
|
||||
|
|
|
|||
|
|
@ -15,15 +15,14 @@ import asyncio
|
|||
|
||||
from modules.workflows.methods.methodBase import MethodBase, action
|
||||
from modules.datamodels.datamodelWorkflow import ActionResult
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MethodSharepoint(MethodBase):
|
||||
"""SharePoint operations methods."""
|
||||
|
||||
def __init__(self, service):
|
||||
super().__init__(service)
|
||||
def __init__(self, services):
|
||||
super().__init__(services)
|
||||
self.name = "sharepoint"
|
||||
self.description = "SharePoint operations methods"
|
||||
|
||||
|
|
@ -49,7 +48,7 @@ class MethodSharepoint(MethodBase):
|
|||
return None
|
||||
|
||||
# Configure SharePoint service with the UserConnection
|
||||
if not self.service.sharepoint.setAccessToken(userConnection, self.service.interfaceApp):
|
||||
if not self.service.sharepoint.setAccessTokenFromConnection(userConnection):
|
||||
logger.warning(f"Failed to configure SharePoint service with connection {userConnection.id}")
|
||||
return None
|
||||
|
||||
|
|
@ -785,7 +784,7 @@ class MethodSharepoint(MethodBase):
|
|||
"totalResults": len(found_documents),
|
||||
"maxResults": maxResults,
|
||||
"foundDocuments": found_documents,
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -844,13 +843,13 @@ class MethodSharepoint(MethodBase):
|
|||
try:
|
||||
import json
|
||||
# Resolve the reference label to get the actual document list
|
||||
document_list = self.service.getChatDocumentsFromDocumentList([pathObject])
|
||||
document_list = self.services.workflow.getChatDocumentsFromDocumentList([pathObject])
|
||||
if not document_list or len(document_list) == 0:
|
||||
return ActionResult.isFailure(error=f"No document list found for reference: {pathObject}")
|
||||
|
||||
# Get the first document's content (which should be the JSON)
|
||||
first_document = document_list[0]
|
||||
file_data = self.service.getFileData(first_document.fileId)
|
||||
file_data = self.services.workflow.getFileData(first_document.fileId)
|
||||
if not file_data:
|
||||
return ActionResult.isFailure(error=f"No file data found for document: {pathObject}")
|
||||
|
||||
|
|
@ -878,7 +877,7 @@ class MethodSharepoint(MethodBase):
|
|||
|
||||
# Get documents from reference - ensure documentList is a list, not a string
|
||||
# documentList is already normalized above
|
||||
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
|
||||
chatDocuments = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
|
||||
|
||||
if not chatDocuments:
|
||||
return ActionResult.isFailure(error="No documents found for the provided reference")
|
||||
|
|
@ -1063,7 +1062,7 @@ class MethodSharepoint(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
# Use default JSON format for output
|
||||
|
|
@ -1120,13 +1119,13 @@ class MethodSharepoint(MethodBase):
|
|||
try:
|
||||
import json
|
||||
# Resolve the reference label to get the actual document list
|
||||
document_list = self.service.getChatDocumentsFromDocumentList([pathObject])
|
||||
document_list = self.services.workflow.getChatDocumentsFromDocumentList([pathObject])
|
||||
if not document_list or len(document_list) == 0:
|
||||
return ActionResult.isFailure(error=f"No document list found for reference: {pathObject}")
|
||||
|
||||
# Get the first document's content (which should be the JSON)
|
||||
first_document = document_list[0]
|
||||
file_data = self.service.getFileData(first_document.fileId)
|
||||
file_data = self.services.workflow.getFileData(first_document.fileId)
|
||||
if not file_data:
|
||||
return ActionResult.isFailure(error=f"No file data found for document: {pathObject}")
|
||||
|
||||
|
|
@ -1221,7 +1220,7 @@ class MethodSharepoint(MethodBase):
|
|||
# Get documents from reference - ensure documentList is a list, not a string
|
||||
if isinstance(documentList, str):
|
||||
documentList = [documentList] # Convert string to list
|
||||
chatDocuments = self.service.getChatDocumentsFromDocumentList(documentList)
|
||||
chatDocuments = self.services.workflow.getChatDocumentsFromDocumentList(documentList)
|
||||
if not chatDocuments:
|
||||
return ActionResult.isFailure(error="No documents found for the provided reference")
|
||||
|
||||
|
|
@ -1311,7 +1310,7 @@ class MethodSharepoint(MethodBase):
|
|||
for i, (chatDocument, fileName) in enumerate(zip(chatDocuments, fileNames)):
|
||||
try:
|
||||
fileId = chatDocument.fileId
|
||||
file_data = self.service.getFileData(fileId)
|
||||
file_data = self.services.workflow.getFileData(fileId)
|
||||
|
||||
if not file_data:
|
||||
logger.warning(f"File data not found for fileId: {fileId}")
|
||||
|
|
@ -1414,7 +1413,7 @@ class MethodSharepoint(MethodBase):
|
|||
"authority": "microsoft",
|
||||
"reference": connectionReference
|
||||
},
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
# Use default JSON format for output
|
||||
|
|
@ -1471,14 +1470,14 @@ class MethodSharepoint(MethodBase):
|
|||
try:
|
||||
import json
|
||||
# Resolve the reference label to get the actual document list
|
||||
document_list = self.service.getChatDocumentsFromDocumentList([pathObject])
|
||||
document_list = self.services.workflow.getChatDocumentsFromDocumentList([pathObject])
|
||||
if not document_list or len(document_list) == 0:
|
||||
return ActionResult.isFailure(error=f"No document list found for reference: {pathObject}")
|
||||
|
||||
# Get the first document's content (which should be the JSON)
|
||||
first_document = document_list[0]
|
||||
logger.info(f"Document fileId: {first_document.fileId}, fileName: {first_document.fileName}")
|
||||
file_data = self.service.getFileData(first_document.fileId)
|
||||
file_data = self.services.workflow.getFileData(first_document.fileId)
|
||||
if not file_data:
|
||||
return ActionResult.isFailure(error=f"No file data found for document: {pathObject} (fileId: {first_document.fileId})")
|
||||
logger.info(f"File data length: {len(file_data) if file_data else 0}")
|
||||
|
|
@ -1807,7 +1806,7 @@ class MethodSharepoint(MethodBase):
|
|||
"includeSubfolders": includeSubfolders,
|
||||
"sitesSearched": len(sites),
|
||||
"listResults": list_results,
|
||||
"timestamp": get_utc_timestamp()
|
||||
"timestamp": self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
|
||||
# Use default JSON format for output
|
||||
|
|
|
|||
|
|
@ -18,13 +18,10 @@ logger = logging.getLogger(__name__)
|
|||
class MethodWeb(MethodBase):
|
||||
"""Web method implementation for web operations."""
|
||||
|
||||
def __init__(self, serviceCenter: Any):
|
||||
super().__init__(serviceCenter)
|
||||
def __init__(self, services):
|
||||
super().__init__(services)
|
||||
self.name = "web"
|
||||
self.description = "Web search, crawling, and scraping operations using Tavily"
|
||||
# Centralized services interface (for AI)
|
||||
from modules.services import getInterface as getServices
|
||||
self.services = getServices(self.service.user, self.service.workflow)
|
||||
|
||||
@action
|
||||
async def search(self, parameters: Dict[str, Any]) -> ActionResult:
|
||||
|
|
@ -143,7 +140,7 @@ class MethodWeb(MethodBase):
|
|||
)
|
||||
|
||||
# Resolve document list reference to ChatDocument objects
|
||||
chat_documents = self.service.getChatDocumentsFromDocumentList(document_list)
|
||||
chat_documents = self.services.workflow.getChatDocumentsFromDocumentList(document_list)
|
||||
|
||||
if not chat_documents:
|
||||
return ActionResult(
|
||||
|
|
@ -160,7 +157,7 @@ class MethodWeb(MethodBase):
|
|||
logger.info(f"Processing document {i+1}/{len(chat_documents)}: {doc.fileName}")
|
||||
|
||||
# Get file data using the service center
|
||||
file_data = self.service.getFileData(doc.fileId)
|
||||
file_data = self.services.workflow.getFileData(doc.fileId)
|
||||
if not file_data:
|
||||
logger.warning(f"Could not retrieve file data for document: {doc.fileName}")
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -1,17 +1,29 @@
|
|||
# handlingTasks.py
|
||||
# Refactored for clarity and consolidation
|
||||
|
||||
import uuid
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, Any, Optional, List, Union
|
||||
from datetime import datetime, UTC
|
||||
from modules.datamodels.datamodelWorkflow import (TaskStep, TaskContext, ReviewResult, TaskPlan, WorkflowResult, TaskResult, ReviewContext)
|
||||
from modules.datamodels.datamodelWorkflow import TaskStatus, ActionResult
|
||||
from modules.datamodels.datamodelChat import ChatWorkflow, ChatMessage, ChatDocument
|
||||
from modules.interfaces.interfaceAppObjects import getInterface as getAppObjects
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
from modules.datamodels.datamodelWorkflow import (
|
||||
TaskStep,
|
||||
TaskContext,
|
||||
ReviewResult,
|
||||
TaskPlan,
|
||||
TaskResult,
|
||||
ReviewContext,
|
||||
TaskStatus,
|
||||
ActionResult
|
||||
)
|
||||
from modules.datamodels.datamodelChat import (
|
||||
WorkflowResult,
|
||||
ChatWorkflow,
|
||||
ChatMessage,
|
||||
ChatDocument
|
||||
)
|
||||
from modules.workflows.processing.executionState import TaskExecutionState
|
||||
from modules.workflows.processing.promptFactory import (
|
||||
createTaskPlanningPrompt,
|
||||
|
|
@ -23,7 +35,8 @@ from modules.workflows.processing.promptFactory import (
|
|||
)
|
||||
from modules.services.serviceDocument.mainServiceDocumentGeneration import DocumentGenerationService
|
||||
from modules.workflows.processing.promptFactory import methods
|
||||
import uuid
|
||||
from modules.workflows.processing.executionState import should_continue
|
||||
from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -73,7 +86,6 @@ class HandlingTasks:
|
|||
|
||||
# Create proper context object for task planning
|
||||
# For task planning, we need to create a minimal TaskStep since TaskContext requires it
|
||||
from modules.datamodels.datamodelWorkflow import TaskStep
|
||||
planning_task_step = TaskStep(
|
||||
id="planning",
|
||||
objective=userInput,
|
||||
|
|
@ -114,19 +126,21 @@ class HandlingTasks:
|
|||
self.writeTraceLog("Task Plan Prompt", task_planning_prompt)
|
||||
|
||||
# Centralized AI call: Task planning (quality, detailed)
|
||||
prompt = await self.services.ai.callAi(
|
||||
|
||||
options = AiCallOptions(
|
||||
operationType=OperationType.GENERATE_PLAN,
|
||||
priority=Priority.QUALITY,
|
||||
compressPrompt=False,
|
||||
compressContext=False,
|
||||
processingMode=ProcessingMode.DETAILED,
|
||||
maxCost=0.10,
|
||||
maxProcessingTime=30
|
||||
)
|
||||
|
||||
prompt = await self.services.ai.callAiText(
|
||||
prompt=task_planning_prompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "generate_plan",
|
||||
"priority": "quality",
|
||||
"compress_prompt": False,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "detailed",
|
||||
"max_cost": 0.10,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
options=options
|
||||
)
|
||||
|
||||
# Check if AI response is valid
|
||||
|
|
@ -252,7 +266,7 @@ class HandlingTasks:
|
|||
"message": task_summary,
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": "task_plan",
|
||||
"documents": [],
|
||||
# Add workflow context fields - use current workflow round instead of hardcoded 1
|
||||
|
|
@ -374,19 +388,20 @@ class HandlingTasks:
|
|||
self.writeTraceLog("Action Plan Prompt", action_prompt)
|
||||
|
||||
# Centralized AI call: Action planning (quality, detailed)
|
||||
prompt = await self.services.ai.callAi(
|
||||
options = AiCallOptions(
|
||||
operationType=OperationType.GENERATE_PLAN,
|
||||
priority=Priority.QUALITY,
|
||||
compressPrompt=False,
|
||||
compressContext=False,
|
||||
processingMode=ProcessingMode.DETAILED,
|
||||
maxCost=0.10,
|
||||
maxProcessingTime=30
|
||||
)
|
||||
|
||||
prompt = await self.services.ai.callAiText(
|
||||
prompt=action_prompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "generate_plan",
|
||||
"priority": "quality",
|
||||
"compress_prompt": False,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "detailed",
|
||||
"max_cost": 0.10,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
options=options
|
||||
)
|
||||
|
||||
# Check if AI response is valid
|
||||
|
|
@ -466,19 +481,20 @@ class HandlingTasks:
|
|||
prompt = createActionSelectionPrompt(context, self.service)
|
||||
self.writeTraceLog("React Plan Selection Prompt", prompt)
|
||||
# Centralized AI call for plan selection (use plan generation quality)
|
||||
response = await self.services.ai.callAi(
|
||||
options = AiCallOptions(
|
||||
operationType=OperationType.GENERATE_PLAN,
|
||||
priority=Priority.QUALITY,
|
||||
compressPrompt=False,
|
||||
compressContext=False,
|
||||
processingMode=ProcessingMode.DETAILED,
|
||||
maxCost=0.10,
|
||||
maxProcessingTime=30
|
||||
)
|
||||
|
||||
response = await self.services.ai.callAiText(
|
||||
prompt=prompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "generate_plan",
|
||||
"priority": "quality",
|
||||
"compress_prompt": False,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "detailed",
|
||||
"max_cost": 0.10,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
options=options
|
||||
)
|
||||
self.writeTraceLog("React Plan Selection Response", response)
|
||||
json_start = response.find('{') if response else -1
|
||||
|
|
@ -496,19 +512,20 @@ class HandlingTasks:
|
|||
params_prompt = createActionParameterPrompt(context, action, self.service)
|
||||
self.writeTraceLog("React Parameters Prompt", params_prompt)
|
||||
# Centralized AI call for parameter suggestion (balanced analysis)
|
||||
params_resp = await self.services.ai.callAi(
|
||||
options = AiCallOptions(
|
||||
operationType=OperationType.ANALYSE_CONTENT,
|
||||
priority=Priority.BALANCED,
|
||||
compressPrompt=True,
|
||||
compressContext=False,
|
||||
processingMode=ProcessingMode.ADVANCED,
|
||||
maxCost=0.05,
|
||||
maxProcessingTime=30
|
||||
)
|
||||
|
||||
params_resp = await self.services.ai.callAiText(
|
||||
prompt=params_prompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "analyse_content",
|
||||
"priority": "balanced",
|
||||
"compress_prompt": True,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "advanced",
|
||||
"max_cost": 0.05,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
options=options
|
||||
)
|
||||
self.writeTraceLog("React Parameters Response", params_resp)
|
||||
js = params_resp[params_resp.find('{'):params_resp.rfind('}')+1] if params_resp else '{}'
|
||||
|
|
@ -564,19 +581,20 @@ class HandlingTasks:
|
|||
prompt = createRefinementPrompt(context, observation)
|
||||
self.writeTraceLog("React Refinement Prompt", prompt)
|
||||
# Centralized AI call for refinement decision (balanced analysis)
|
||||
resp = await self.services.ai.callAi(
|
||||
options = AiCallOptions(
|
||||
operationType=OperationType.ANALYSE_CONTENT,
|
||||
priority=Priority.BALANCED,
|
||||
compressPrompt=True,
|
||||
compressContext=False,
|
||||
processingMode=ProcessingMode.ADVANCED,
|
||||
maxCost=0.05,
|
||||
maxProcessingTime=30
|
||||
)
|
||||
|
||||
resp = await self.services.ai.callAiText(
|
||||
prompt=prompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "analyse_content",
|
||||
"priority": "balanced",
|
||||
"compress_prompt": True,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "advanced",
|
||||
"max_cost": 0.05,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
options=options
|
||||
)
|
||||
self.writeTraceLog("React Refinement Response", resp)
|
||||
js = resp[resp.find('{'):resp.rfind('}')+1] if resp else '{}'
|
||||
|
|
@ -613,7 +631,7 @@ class HandlingTasks:
|
|||
"message": f"🚀 **Task {task_progress}**",
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": f"task_{task_index}_start",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -671,7 +689,7 @@ class HandlingTasks:
|
|||
"message": f"🔁 Step {step}/{state.max_steps}: {selection.get('action',{}).get('method','')}.{selection.get('action',{}).get('name','')} → {'✅' if result.success else '❌'}",
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": observation.get('resultLabel'),
|
||||
"documents": [],
|
||||
"roundNumber": workflow.currentRound,
|
||||
|
|
@ -684,7 +702,6 @@ class HandlingTasks:
|
|||
logger.error(f"React step {step} error: {e}")
|
||||
break
|
||||
|
||||
from modules.workflows.processing.executionState import should_continue
|
||||
if not should_continue(observation, last_review_dict, step, state.max_steps):
|
||||
break
|
||||
step += 1
|
||||
|
|
@ -759,7 +776,7 @@ class HandlingTasks:
|
|||
"message": f"⚡ **Action {action_number}/{total_actions}** (Method {action.execMethod}.{action.execAction})",
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": f"action_{action_number}_start",
|
||||
"documents": [],
|
||||
# Add action progress status
|
||||
|
|
@ -820,7 +837,7 @@ class HandlingTasks:
|
|||
"message": completion_message,
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": f"task_{task_index}_completion",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -914,7 +931,7 @@ class HandlingTasks:
|
|||
"message": f"🔄 **Task {task_index}** needs retry: {review_result.improvements}",
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": f"task_{task_index}_retry",
|
||||
"documents": [],
|
||||
"roundNumber": workflow.currentRound,
|
||||
|
|
@ -961,7 +978,7 @@ class HandlingTasks:
|
|||
"message": error_message,
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"actionId": None,
|
||||
"actionMethod": "task",
|
||||
"actionName": "task_retry",
|
||||
|
|
@ -1015,7 +1032,7 @@ class HandlingTasks:
|
|||
"message": error_message,
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"actionId": None,
|
||||
"actionMethod": "task",
|
||||
"actionName": "task_failure",
|
||||
|
|
@ -1095,19 +1112,20 @@ class HandlingTasks:
|
|||
self.writeTraceLog("Result Review Prompt", prompt)
|
||||
|
||||
# Centralized AI call: Result validation (balanced analysis)
|
||||
response = await self.services.ai.callAi(
|
||||
options = AiCallOptions(
|
||||
operationType=OperationType.ANALYSE_CONTENT,
|
||||
priority=Priority.BALANCED,
|
||||
compressPrompt=True,
|
||||
compressContext=False,
|
||||
processingMode=ProcessingMode.ADVANCED,
|
||||
maxCost=0.05,
|
||||
maxProcessingTime=30
|
||||
)
|
||||
|
||||
response = await self.services.ai.callAiText(
|
||||
prompt=prompt,
|
||||
documents=None,
|
||||
options={
|
||||
"process_type": "text",
|
||||
"operation_type": "analyse_content",
|
||||
"priority": "balanced",
|
||||
"compress_prompt": True,
|
||||
"compress_documents": False,
|
||||
"processing_mode": "advanced",
|
||||
"max_cost": 0.05,
|
||||
"max_processing_time": 30
|
||||
}
|
||||
options=options
|
||||
)
|
||||
|
||||
# Log result review response received
|
||||
|
|
@ -1220,7 +1238,7 @@ class HandlingTasks:
|
|||
'actions': [action.to_dict() for action in task_actions],
|
||||
'review_result': review_result,
|
||||
'workflow_id': workflow.id,
|
||||
'handover_time': get_utc_timestamp()
|
||||
'handover_time': self.services.utils.getUtcTimestamp()
|
||||
}
|
||||
logger.info(f"Prepared handover for task {task_step.id} in workflow {workflow.id}")
|
||||
return handover_data
|
||||
|
|
@ -1269,7 +1287,7 @@ class HandlingTasks:
|
|||
retryCount=createdAction.get("retryCount", 0),
|
||||
retryMax=createdAction.get("retryMax", 3),
|
||||
processingTime=createdAction.get("processingTime"),
|
||||
timestamp=float(createdAction.get("timestamp", get_utc_timestamp())),
|
||||
timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
|
||||
result=createdAction.get("result"),
|
||||
resultDocuments=createdAction.get("resultDocuments", []),
|
||||
userMessage=createdAction.get("userMessage")
|
||||
|
|
@ -1462,7 +1480,7 @@ class HandlingTasks:
|
|||
"message": message_text,
|
||||
"status": "step",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"actionId": action.id,
|
||||
"actionMethod": action.execMethod,
|
||||
"actionName": action.execAction,
|
||||
|
|
@ -1784,14 +1802,13 @@ class HandlingTasks:
|
|||
import logging
|
||||
import os
|
||||
from datetime import datetime, UTC
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
||||
# Only write if logger is in debug mode
|
||||
if logger.level > logging.DEBUG:
|
||||
return
|
||||
|
||||
# Get log directory from configuration
|
||||
logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR", "./")
|
||||
logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
|
||||
if not os.path.isabs(logDir):
|
||||
# If relative path, make it relative to the gateway directory
|
||||
gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
|
@ -1804,7 +1821,7 @@ class HandlingTasks:
|
|||
trace_file = os.path.join(logDir, "log_trace.log")
|
||||
|
||||
# Format the trace entry
|
||||
timestamp = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
trace_entry = f"[{timestamp}] {contextText}\n"
|
||||
|
||||
# Add data if provided
|
||||
|
|
@ -1830,10 +1847,9 @@ class HandlingTasks:
|
|||
try:
|
||||
import logging
|
||||
import os
|
||||
from modules.shared.configuration import APP_CONFIG
|
||||
|
||||
# Get log directory from configuration
|
||||
logDir = APP_CONFIG.get("APP_LOGGING_LOG_DIR", "./")
|
||||
logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
|
||||
if not os.path.isabs(logDir):
|
||||
# If relative path, make it relative to the gateway directory
|
||||
gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import inspect
|
|||
from typing import Any, Dict, List
|
||||
from modules.datamodels.datamodelWorkflow import TaskContext, ReviewContext, DocumentExchange
|
||||
from modules.datamodels.datamodelChat import ChatDocument
|
||||
from modules.services.serviceDocument.documentUtility import getFileExtension
|
||||
from modules.services.serviceDocument.subDocumentUtility import getFileExtension
|
||||
from modules.workflows.methods.methodBase import MethodBase
|
||||
|
||||
# Set up logger
|
||||
|
|
@ -212,8 +212,8 @@ def _getAvailableDocuments(workflow) -> str:
|
|||
def _getConnectionReferenceList(service) -> List[str]:
|
||||
"""Get list of all UserConnection objects as references with enhanced state information"""
|
||||
connections = []
|
||||
# Get user connections through AppObjects interface
|
||||
user_connections = service.interfaceApp.getUserConnections(service.user.id)
|
||||
# Get user connections
|
||||
user_connections = service.interfaceDbApp.getUserConnections(service.user.id)
|
||||
|
||||
refreshed_count = 0
|
||||
for conn in user_connections:
|
||||
|
|
|
|||
|
|
@ -4,16 +4,16 @@ from datetime import datetime, UTC
|
|||
import uuid
|
||||
import asyncio
|
||||
|
||||
from modules.interfaces.interfaceAppObjects import User
|
||||
|
||||
from modules.datamodels.datamodelWorkflow import UserInputRequest
|
||||
from modules.datamodels.datamodelChat import ChatMessage, ChatWorkflow, ChatDocument
|
||||
from modules.datamodels.datamodelWorkflow import TaskItem, TaskStatus
|
||||
from modules.interfaces.interfaceChatObjects import ChatObjects
|
||||
from modules.datamodels.datamodelChat import (
|
||||
UserInputRequest,
|
||||
ChatMessage,
|
||||
ChatWorkflow,
|
||||
ChatDocument,
|
||||
WorkflowResult
|
||||
)
|
||||
from modules.datamodels.datamodelWorkflow import TaskItem, TaskStatus, TaskContext
|
||||
from modules.workflows.processing.handlingTasks import HandlingTasks, WorkflowStoppedException
|
||||
from modules.datamodels.datamodelWorkflow import WorkflowResult
|
||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||
import uuid
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ class WorkflowManager:
|
|||
try:
|
||||
# Debug log to check workflowMode parameter
|
||||
logger.info(f"WorkflowManager received workflowMode: {workflowMode}")
|
||||
currentTime = get_utc_timestamp()
|
||||
currentTime = self.services.utils.getUtcTimestamp()
|
||||
|
||||
if workflowId:
|
||||
workflow = self.services.workflow.getWorkflow(workflowId)
|
||||
|
|
@ -127,7 +127,7 @@ class WorkflowManager:
|
|||
raise ValueError(f"Workflow {workflowId} not found")
|
||||
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
workflow.lastActivity = self.services.utils.getUtcTimestamp()
|
||||
self.services.workflow.updateWorkflow(workflowId, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity
|
||||
|
|
@ -181,7 +181,7 @@ class WorkflowManager:
|
|||
"message": userInput.prompt,
|
||||
"status": "first",
|
||||
"sequenceNr": 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": context_label,
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -241,7 +241,6 @@ class WorkflowManager:
|
|||
logger.info(f"Task {current_task_index}/{total_tasks}: {task_step.objective}")
|
||||
|
||||
# Build TaskContext (mode-specific behavior is inside HandlingTasks)
|
||||
from modules.datamodels.datamodelWorkflow import TaskContext
|
||||
task_context = TaskContext(
|
||||
task_step=task_step,
|
||||
workflow=workflow,
|
||||
|
|
@ -298,7 +297,7 @@ class WorkflowManager:
|
|||
"message": "🛑 Workflow stopped by user",
|
||||
"status": "last",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": "workflow_stopped",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -315,7 +314,7 @@ class WorkflowManager:
|
|||
|
||||
# Update workflow status to stopped
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
workflow.lastActivity = self.services.utils.getUtcTimestamp()
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity
|
||||
|
|
@ -330,7 +329,7 @@ class WorkflowManager:
|
|||
"message": "🛑 Workflow stopped by user",
|
||||
"status": "last",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": "workflow_stopped",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -347,7 +346,7 @@ class WorkflowManager:
|
|||
|
||||
# Update workflow status to stopped
|
||||
workflow.status = "stopped"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
workflow.lastActivity = self.services.utils.getUtcTimestamp()
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "stopped",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
|
|
@ -372,7 +371,7 @@ class WorkflowManager:
|
|||
"message": f"Workflow failed: {workflow_result.error or 'Unknown error'}",
|
||||
"status": "last",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": "workflow_failure",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -389,7 +388,7 @@ class WorkflowManager:
|
|||
|
||||
# Update workflow status to failed
|
||||
workflow.status = "failed"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
workflow.lastActivity = self.services.utils.getUtcTimestamp()
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "failed",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
|
|
@ -419,7 +418,7 @@ class WorkflowManager:
|
|||
"message": f"Error processing workflow results: {str(e)}",
|
||||
"status": "last",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": "workflow_error",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -436,7 +435,7 @@ class WorkflowManager:
|
|||
|
||||
# Update workflow status to failed
|
||||
workflow.status = "failed"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
workflow.lastActivity = self.services.utils.getUtcTimestamp()
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
"status": "failed",
|
||||
"lastActivity": workflow.lastActivity,
|
||||
|
|
@ -462,7 +461,7 @@ class WorkflowManager:
|
|||
"message": feedback,
|
||||
"status": "last",
|
||||
"sequenceNr": len(workflow.messages) + 1,
|
||||
"publishedAt": get_utc_timestamp(),
|
||||
"publishedAt": self.services.utils.getUtcTimestamp(),
|
||||
"documentsLabel": "workflow_feedback",
|
||||
"documents": [],
|
||||
# Add workflow context fields
|
||||
|
|
@ -481,7 +480,7 @@ class WorkflowManager:
|
|||
|
||||
# Update workflow status to completed
|
||||
workflow.status = "completed"
|
||||
workflow.lastActivity = get_utc_timestamp()
|
||||
workflow.lastActivity = self.services.utils.getUtcTimestamp()
|
||||
|
||||
# Update workflow in database
|
||||
self.services.workflow.updateWorkflow(workflow.id, {
|
||||
|
|
|
|||
Loading…
Reference in a new issue