diff --git a/connectors/connector_aichat_anthropic.py b/connectors/connector_aichat_anthropic.py index de150895..15728755 100644 --- a/connectors/connector_aichat_anthropic.py +++ b/connectors/connector_aichat_anthropic.py @@ -4,10 +4,10 @@ from typing import Dict, Any, List, Optional, Union from fastapi import HTTPException from modules.configuration import APP_CONFIG -# Logger konfigurieren +# Configure logger logger = logging.getLogger(__name__) -# Konfigurationsdaten laden +# Load configuration data def load_config_data(): return { "api_key": APP_CONFIG.get('Connector_AiAnthropic_API_SECRET'), @@ -19,19 +19,19 @@ def load_config_data(): class ChatService: """ - Connector für die Kommunikation mit der Anthropic API. + Connector for communication with the Anthropic API. """ def __init__(self): - # Konfiguration laden + # Load configuration self.config = load_config_data() self.api_key = self.config["api_key"] self.api_url = self.config["api_url"] self.model_name = self.config["model_name"] - # HttpClient für API-Aufrufe + # HttpClient for API calls self.http_client = httpx.AsyncClient( - timeout=120.0, # Längeres Timeout für komplexe Anfragen + timeout=120.0, # Longer timeout for complex requests headers={ "x-api-key": self.api_key, "anthropic-version": "2023-06-01", # Anthropic API Version @@ -39,35 +39,35 @@ class ChatService: } ) - logger.info(f"Anthropic Connector initialisiert mit Modell: {self.model_name}") + logger.info(f"Anthropic Connector initialized with model: {self.model_name}") async def call_api(self, messages: List[Dict[str, Any]], temperature: float = None, max_tokens: int = None) -> Dict[str, Any]: """ - Ruft die Anthropic API mit den gegebenen Nachrichten auf. + Calls the Anthropic API with the given messages. Args: - messages: Liste von Nachrichten im OpenAI-Format (role, content) - temperature: Temperatur für die Antwortgenerierung (0.0-1.0) - max_tokens: Maximale Anzahl der Token in der Antwort + messages: List of messages in OpenAI format (role, content) + temperature: Temperature for response generation (0.0-1.0) + max_tokens: Maximum number of tokens in the response Returns: - Die Antwort umgewandelt ins OpenAI-Format + The response converted to OpenAI format Raises: - HTTPException: Bei Fehlern in der API-Kommunikation + HTTPException: For errors in API communication """ try: - # OpenAI-Format in Anthropic-Format umwandeln + # Convert OpenAI format to Anthropic format formatted_messages = self._convert_to_anthropic_format(messages) - # Verwende Parameter aus der Konfiguration, falls keine überschrieben wurden + # Use parameters from configuration if none were overridden if temperature is None: temperature = self.config.get("temperature", 0.2) if max_tokens is None: max_tokens = self.config.get("max_tokens", 2000) - # Anthropic API Payload erstellen + # Create Anthropic API payload payload = { "model": self.model_name, "messages": formatted_messages, @@ -81,58 +81,58 @@ class ChatService: ) if response.status_code != 200: - logger.error(f"Anthropic API-Fehler: {response.status_code} - {response.text}") - raise HTTPException(status_code=500, detail="Fehler bei der Kommunikation mit Anthropic API") + logger.error(f"Anthropic API error: {response.status_code} - {response.text}") + raise HTTPException(status_code=500, detail="Error communicating with Anthropic API") - # Antwort im Anthropic-Format in OpenAI-Format umwandeln + # Convert response from Anthropic format to OpenAI format anthropic_response = response.json() openai_formatted_response = self._convert_to_openai_format(anthropic_response) return openai_formatted_response except Exception as e: - logger.error(f"Fehler beim Aufruf der Anthropic API: {str(e)}") - raise HTTPException(status_code=500, detail=f"Fehler beim Aufruf der Anthropic API: {str(e)}") + logger.error(f"Error calling Anthropic API: {str(e)}") + raise HTTPException(status_code=500, detail=f"Error calling Anthropic API: {str(e)}") def _convert_to_anthropic_format(self, openai_messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ - Konvertiert Nachrichten vom OpenAI-Format ins Anthropic-Format. + Converts messages from OpenAI format to Anthropic format. - OpenAI verwendet: + OpenAI uses: [{"role": "system", "content": "..."}, {"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}] - Anthropic verwendet: + Anthropic uses: [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}] - Anmerkung: Anthropic hat kein direktes System-Message-Äquivalent, - daher fügen wir System-Nachrichten in die erste User-Nachricht ein. + Note: Anthropic has no direct system message equivalent, + so we add system messages to the first user message. """ anthropic_messages = [] system_content = "" - # Extrahiere zuerst alle System-Nachrichten + # First extract all system messages for msg in openai_messages: if msg.get("role") == "system": system_content += msg.get("content", "") + "\n\n" - # Konvertiere die restlichen Nachrichten + # Convert the remaining messages for i, msg in enumerate(openai_messages): role = msg.get("role") content = msg.get("content", "") - # System-Nachrichten überspringen (bereits extrahiert) + # Skip system messages (already extracted) if role == "system": continue - # Für die erste User-Nachricht: System-Inhalte voranstellen, falls vorhanden + # For the first user message: prepend system content if available if role == "user" and system_content and not any(m.get("role") == "user" for m in anthropic_messages): if isinstance(content, str): content = system_content + content elif isinstance(content, list): - # Wenn content ein Array ist (für Multimodal-Nachrichten) + # If content is an array (for multimodal messages) text_parts = [] for part in content: if part.get("type") == "text": @@ -141,7 +141,7 @@ class ChatService: if text_parts: text_parts[0]["text"] = system_content + text_parts[0].get("text", "") - # Anthropic unterstützt nur "user" und "assistant" als Rollen + # Anthropic only supports "user" and "assistant" roles if role not in ["user", "assistant"]: role = "user" @@ -151,21 +151,21 @@ class ChatService: def _convert_to_openai_format(self, anthropic_response: Dict[str, Any]) -> Dict[str, Any]: """ - Konvertiert eine Antwort vom Anthropic-Format ins OpenAI-Format. + Converts a response from Anthropic format to OpenAI format. """ - # Extrahiere Inhalt aus Anthropic-Antwort + # Extract content from Anthropic response content = "" if "content" in anthropic_response: if isinstance(anthropic_response["content"], list): - # Inhalt ist eine Liste von Teilen (bei neueren API-Versionen) + # Content is a list of parts (in newer API versions) for part in anthropic_response["content"]: if part.get("type") == "text": content += part.get("text", "") else: - # Direkter Inhalt als String (bei älteren API-Versionen) + # Direct content as string (in older API versions) content = anthropic_response["content"] - # Erstelle OpenAI-formatierte Antwort + # Create OpenAI-formatted response return { "id": anthropic_response.get("id", ""), "object": "chat.completion", @@ -185,33 +185,33 @@ class ChatService: async def analyze_image(self, image_data: Union[str, bytes], mime_type: str = None, prompt: str = "Describe this image") -> str: """ - Analysiert ein Bild mit der OpenAI Vision API. + Analyzes an image with the OpenAI Vision API. Args: - image_data: Entweder ein Dateipfad (str) oder Bilddaten (bytes) - mime_type: Der MIME-Typ des Bildes (optional, nur für Binärdaten) - prompt: Der Prompt für die Analyse + image_data: Either a file path (str) or image data (bytes) + mime_type: The MIME type of the image (optional, only for binary data) + prompt: The prompt for analysis Returns: - Die Antwort der OpenAI Vision API als Text + The response from the OpenAI Vision API as text """ try: - # Unterscheide zwischen Dateipfad und Binärdaten + # Distinguish between file path and binary data if isinstance(image_data, str): - # Es ist ein Dateipfad - importiere filehandling nur bei Bedarf + # It's a file path - import filehandling only when needed from modules import agentservice_filemanager as file_handler base64_data, auto_mime_type = file_handler.encode_file_to_base64(image_data) mime_type = mime_type or auto_mime_type else: - # Es sind Binärdaten + # It's binary data import base64 base64_data = base64.b64encode(image_data).decode('utf-8') - # MIME-Typ muss angegeben sein für Binärdaten + # MIME type must be specified for binary data if not mime_type: - # Fallback auf generischen Bildtyp + # Fallback to generic image type mime_type = "image/png" - # Bereite den Payload für die Vision API vor + # Prepare the payload for the Vision API messages = [ { "role": "user", @@ -227,12 +227,12 @@ class ChatService: } ] - # Verwende die bestehende call_api Funktion mit dem Vision-Modell + # Use the existing call_api function with the Vision model response = await self.call_api(messages) - # Inhalt extrahieren und zurückgeben + # Extract and return content return response["choices"][0]["message"]["content"] except Exception as e: - logger.error(f"Fehler bei der Bildanalyse: {str(e)}", exc_info=True) - return f"[Fehler bei der Bildanalyse: {str(e)}]" \ No newline at end of file + logger.error(f"Error during image analysis: {str(e)}", exc_info=True) + return f"[Error during image analysis: {str(e)}]" \ No newline at end of file diff --git a/connectors/connector_aichat_openai.py b/connectors/connector_aichat_openai.py index a83aae25..456490a6 100644 --- a/connectors/connector_aichat_openai.py +++ b/connectors/connector_aichat_openai.py @@ -4,10 +4,10 @@ from typing import Dict, Any, List, Optional, Union from fastapi import HTTPException from modules.configuration import APP_CONFIG -# Logger konfigurieren +# Configure logger logger = logging.getLogger(__name__) -# Konfigurationsdaten laden +# Load configuration data def load_config_data(): return { "api_key": APP_CONFIG.get('Connector_AiOpenai_API_SECRET'), @@ -19,44 +19,44 @@ def load_config_data(): class ChatService: """ - Connector für die Kommunikation mit der OpenAI API. + Connector for communication with the OpenAI API. """ def __init__(self): - # Konfiguration laden + # Load configuration self.config = load_config_data() self.api_key = self.config["api_key"] self.api_url = self.config["api_url"] self.model_name = self.config["model_name"] - # HttpClient für API-Aufrufe + # HttpClient for API calls self.http_client = httpx.AsyncClient( - timeout=120.0, # Längeres Timeout für komplexe Anfragen + timeout=120.0, # Longer timeout for complex requests headers={ "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json" } ) - logger.info(f"OpenAI Connector initialisiert mit Modell: {self.model_name}") + logger.info(f"OpenAI Connector initialized with model: {self.model_name}") async def call_api(self, messages: List[Dict[str, Any]], temperature: float = None, max_tokens: int = None) -> str: """ - Ruft die OpenAI API mit den gegebenen Nachrichten auf. + Calls the OpenAI API with the given messages. Args: - messages: Liste von Nachrichten im OpenAI-Format (role, content) - temperature: Temperatur für die Antwortgenerierung (0.0-1.0) - max_tokens: Maximale Anzahl der Token in der Antwort + messages: List of messages in OpenAI format (role, content) + temperature: Temperature for response generation (0.0-1.0) + max_tokens: Maximum number of tokens in the response Returns: - Die Antwort der OpenAI API + The response from the OpenAI API Raises: - HTTPException: Bei Fehlern in der API-Kommunikation + HTTPException: For errors in API communication """ try: - # Verwende Parameter aus der Konfiguration, falls keine überschrieben wurden + # Use parameters from configuration if none were overridden if temperature is None: temperature = self.config.get("temperature", 0.2) @@ -76,51 +76,51 @@ class ChatService: ) if response.status_code != 200: - logger.error(f"OpenAI API-Fehler: {response.status_code} - {response.text}") - raise HTTPException(status_code=500, detail="Fehler bei der Kommunikation mit OpenAI API") + logger.error(f"OpenAI API error: {response.status_code} - {response.text}") + raise HTTPException(status_code=500, detail="Error communicating with OpenAI API") response_json = response.json() content = response_json["choices"][0]["message"]["content"] return content except Exception as e: - logger.error(f"Fehler beim Aufruf der OpenAI API: {str(e)}") - raise HTTPException(status_code=500, detail=f"Fehler beim Aufruf der OpenAI API: {str(e)}") + logger.error(f"Error calling OpenAI API: {str(e)}") + raise HTTPException(status_code=500, detail=f"Error calling OpenAI API: {str(e)}") async def close(self): - """Schließt den HTTP-Client beim Beenden der Anwendung""" + """Closes the HTTP client when the application exits""" await self.http_client.aclose() async def analyze_image(self, image_data: Union[str, bytes], mime_type: str = None, prompt: str = "Describe this image") -> str: """ - Analysiert ein Bild mit der OpenAI Vision API. + Analyzes an image with the OpenAI Vision API. Args: - image_data: Entweder ein Dateipfad (str) oder Bilddaten (bytes) - mime_type: Der MIME-Typ des Bildes (optional, nur für Binärdaten) - prompt: Der Prompt für die Analyse + image_data: Either a file path (str) or image data (bytes) + mime_type: The MIME type of the image (optional, only for binary data) + prompt: The prompt for analysis Returns: - Die Antwort der OpenAI Vision API als Text + The response from the OpenAI Vision API as text """ try: logger.debug("Starting image analysis...") - # Unterscheide zwischen Dateipfad und Binärdaten + # Distinguish between file path and binary data if isinstance(image_data, str): - # Es ist ein Dateipfad - importiere filehandling nur bei Bedarf + # It's a file path - import filehandling only when needed from modules import agentservice_filemanager as file_handler base64_data, auto_mime_type = file_handler.encode_file_to_base64(image_data) mime_type = mime_type or auto_mime_type else: - # Es sind Binärdaten + # It's binary data import base64 base64_data = base64.b64encode(image_data).decode('utf-8') - # MIME-Typ muss angegeben sein für Binärdaten + # MIME type must be specified for binary data if not mime_type: - # Fallback auf generischen Bildtyp + # Fallback to generic image type mime_type = "image/png" - # Bereite den Payload für die Vision API vor + # Prepare the payload for the Vision API messages = [ { "role": "user", @@ -136,12 +136,12 @@ class ChatService: } ] - # Verwende die bestehende call_api Funktion mit dem Vision-Modell + # Use the existing call_api function with the Vision model response = await self.call_api(messages) - # Inhalt extrahieren und zurückgeben + # Extract and return content return response except Exception as e: - logger.error(f"Fehler bei der Bildanalyse: {str(e)}", exc_info=True) - return f"[Fehler bei der Bildanalyse: {str(e)}]" \ No newline at end of file + logger.error(f"Error during image analysis: {str(e)}", exc_info=True) + return f"[Error during image analysis: {str(e)}]" \ No newline at end of file diff --git a/connectors/connector_db_json.py b/connectors/connector_db_json.py index 183d0bc5..919c2bfa 100644 --- a/connectors/connector_db_json.py +++ b/connectors/connector_db_json.py @@ -278,7 +278,7 @@ class DatabaseConnector: # Public API - + def get_tables(self, filter_criteria: Dict[str, Any] = None) -> List[str]: """ Returns a list of all available tables. diff --git a/modules/auth.py b/modules/auth.py index fd4f159c..fecb7ab8 100644 --- a/modules/auth.py +++ b/modules/auth.py @@ -23,14 +23,14 @@ logger = logging.getLogger(__name__) def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str: """ - Erstellt ein JWT Access Token. + Creates a JWT Access Token. Args: - data: Zu kodierende Daten (meist Benutzer-ID oder Benutzername) - expires_delta: Gültigkeitsdauer des Tokens (optional) + data: Data to encode (usually user ID or username) + expires_delta: Validity duration of the token (optional) Returns: - JWT Token als String + JWT Token as string """ to_encode = data.copy() @@ -47,85 +47,85 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) - async def get_current_user(token: str = Depends(oauth2_scheme)) -> Dict[str, Any]: """ - Extrahiert und validiert den aktuellen Benutzer aus dem JWT Token. + Extracts and validates the current user from the JWT token. Args: - token: JWT Token aus dem Authorization-Header + token: JWT Token from the Authorization header Returns: - Benutzerdaten + User data Raises: - HTTPException: Bei ungültigem Token oder Benutzer + HTTPException: For invalid token or user """ credentials_exception = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, - detail="Ungültige Authentifizierungsdaten", + detail="Invalid authentication credentials", headers={"WWW-Authenticate": "Bearer"}, ) try: - # Token dekodieren + # Decode token payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) - # Benutzername aus dem Token extrahieren + # Extract username from token username: str = payload.get("sub") if username is None: raise credentials_exception - # Mandanten-ID aus dem Token extrahieren (falls vorhanden) - mandate_id: int = payload.get("mandate_id", 1) # Standard: Root-Mandant + # Extract mandate ID from token (if present) + mandate_id: int = payload.get("mandate_id", 1) # Default: Root mandate except JWTError: - logger.warning("Ungültiges JWT Token") + logger.warning("Invalid JWT Token") raise credentials_exception - # Gateway-Interface ohne Kontext initialisieren + # Initialize Gateway Interface without context gateway = get_gateway_interface() - # Benutzer aus der Datenbank abrufen + # Retrieve user from database user = gateway.get_user_by_username(username) if user is None: - logger.warning(f"Benutzer {username} nicht gefunden") + logger.warning(f"User {username} not found") raise credentials_exception if user.get("disabled", False): - logger.warning(f"Benutzer {username} ist deaktiviert") - raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Benutzer ist deaktiviert") + logger.warning(f"User {username} is disabled") + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled") return user async def get_current_active_user(current_user: Dict[str, Any] = Depends(get_current_user)) -> Dict[str, Any]: """ - Stellt sicher, dass der Benutzer aktiv ist. + Ensures that the user is active. Args: - current_user: Aktuelle Benutzerdaten + current_user: Current user data Returns: - Benutzerdaten + User data Raises: - HTTPException: Wenn der Benutzer deaktiviert ist + HTTPException: If the user is disabled """ if current_user.get("disabled", False): - raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Benutzer ist deaktiviert") + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled") return current_user async def get_user_context(current_user: Dict[str, Any]) -> Tuple[int, int]: """ - Extrahiert die Mandanten-ID und Benutzer-ID aus dem aktuellen Benutzer. + Extracts the mandate ID and user ID from the current user. Enhanced with better logging. Args: - current_user: Der aktuelle Benutzer + current_user: The current user Returns: - Tuple von (mandate_id, user_id) + Tuple of (mandate_id, user_id) """ # Default values default_mandate_id = 0 diff --git a/modules/chat.py b/modules/chat.py index d03eb971..442341f6 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -1,7 +1,7 @@ """ -ChatManager Modul zur Verwaltung von AI-Chat-Workflows. -Implementiert eine kompakte und modulare Architektur für die Verarbeitung -von Benutzeranfragen, Agentenausführung und Ergebnisformatierung. +ChatManager Module for managing AI-Chat workflows. +Implements a compact and modular architecture for processing +user requests, agent execution, and result formatting. """ import os @@ -13,28 +13,28 @@ import base64 from datetime import datetime from typing import Dict, Any, List, Optional, Union -# Notwendige Importe +# Required imports from connectors.connector_aichat_openai import ChatService from modules.chat_registry import get_agent_registry -from modules.lucydom_interface import get_lucydom_interface +from modules.lucydom_interface import get_lucydom_interface, GLOBAL_SETTINGS from modules.chat_content_extraction import get_document_contents -# Logger konfigurieren +# Configure logger logger = logging.getLogger(__name__) class ChatManager: """ - Verwaltet die Verarbeitung von Chat-Anfragen, Agentenausführung und - die Integration von Ergebnissen in den Workflow. + Manages the processing of chat requests, agent execution, and + the integration of results into the workflow. """ def __init__(self, mandate_id: int, user_id: int): """ - Initialisiert den ChatManager mit Mandanten- und Benutzerkontext. + Initializes the ChatManager with mandate and user context. Args: - mandate_id: ID des aktuellen Mandanten - user_id: ID des aktuellen Benutzers + mandate_id: ID of the current mandate + user_id: ID of the current user """ self.mandate_id = mandate_id self.user_id = user_id @@ -42,33 +42,42 @@ class ChatManager: self.lucy_interface = get_lucydom_interface(mandate_id, user_id) self.agent_registry = get_agent_registry() self.agent_registry.set_ai_service(self.ai_service) + + # Set AI service in lucy interface for language support + self.lucy_interface.set_ai_service(self.ai_service) ### Chat Management async def chat_run(self, user_input: Dict[str, Any], workflow_id: Optional[str] = None) -> Dict[str, Any]: """ - Hauptfunktion zur Integration von Benutzeranfragen in den Workflow. + Main function for integrating user requests into the workflow. Args: - user_input: Dictionary mit Benutzeranfrage und Datei-IDs - workflow_id: Optional - ID des Workflows (None für neue Workflows) + user_input: Dictionary with user request and file IDs + workflow_id: Optional - ID of the workflow (None for new workflows) Returns: - Workflow-Objekt mit aktualisiertem Zustand + Workflow object with updated state """ - # 1. Workflow initialisieren oder bestehenden laden + # 1. Initialize workflow or load existing one workflow = self.workflow_init(workflow_id) + self.log_add(workflow, "Starting workflow processing", level="info", progress=0) - # 2. User-Input in Message-Objekt transformieren und im Workflow speichern + # 2. Transform user input into a message object and save in workflow message_user = await self.chat_message_to_workflow("user", "", user_input, workflow) - # 3. Projektleiter-Prompt erstellen und Antwort analysieren + # 3. Create project manager prompt and analyze response + self.log_add(workflow, "Analyzing request and planning work", level="info", progress=10) project_manager_response = await self.chat_prompt(message_user, workflow) obj_final_documents = project_manager_response.get("obj_final_documents", []) obj_workplan = project_manager_response.get("obj_workplan", []) obj_user_response = project_manager_response.get("obj_user_response", "") - # 4. Speichere die Antwort als Message im Workflow und füge Log-Einträge hinzu + # Get detected language and set it in the lucy interface + user_language = project_manager_response.get("user_language", "en") + self.lucy_interface.set_user_language(user_language) + + # 4. Save the response as a message in the workflow and add log entries response_message = { "role": "assistant", "agent_name": "project_manager", @@ -76,50 +85,67 @@ class ChatManager: } self.message_add(workflow, response_message) - self.log_add(workflow, f"Geplante Ergebnisse: {self.parse_json2text(obj_final_documents)}") - self.log_add(workflow, f"Arbeitsplan: {self.parse_json2text(obj_workplan)}") - self.log_add(workflow, f"Info an den User: {obj_user_response}") - - # 5. Agenten gemäss Workplan ausführen + self.log_add(workflow, f"Planned outputs: {len(obj_final_documents)} documents", level="info", progress=20) + self.log_add(workflow, f"Work plan created with {len(obj_workplan)} steps", level="info", progress=25) + + # 5. Execute agents according to work plan obj_results = [] if obj_workplan: - for task in obj_workplan: + total_tasks = len(obj_workplan) + for task_index, task in enumerate(obj_workplan): + agent_name = task.get("agent", "unknown") + progress_value = 30 + int((task_index / total_tasks) * 60) # Progress from 30% to 90% + + progress_msg = f"Running task {task_index+1}/{total_tasks}: {agent_name}" + self.log_add(workflow, progress_msg, level="info", progress=progress_value) + task_results = await self.agent_processing(task, workflow) - obj_results.extend(task_results) + obj_results.extend(task_results) + + # Log completion of this task + self.log_add( + workflow, + f"Completed task {task_index+1}/{total_tasks}: {agent_name}", + level="info", + progress=progress_value + (60/total_tasks)/2 + ) - # 6. Erstelle die finale Antwort mit den relevanten Dokumenten aus obj_final_documents + # 6. Create the final response with relevant documents from obj_final_documents + self.log_add(workflow, "Creating final response", level="info", progress=90) final_message = await self.chat_final_message(obj_user_response, obj_final_documents, obj_results) self.message_add(workflow, final_message) - # 7. Finalisiere den Workflow + # 7. Finalize the workflow self.workflow_finish(workflow) + self.log_add(workflow, "Workflow completed successfully", level="info", progress=100) return workflow async def chat_prompt(self, message_user: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]: """ - Erstellt den Prompt für den Projektleiter und verarbeitet seine Antwort. + Creates the prompt for the project manager and processes the response. Args: - message_user: Message-Objekt mit Benutzeranfrage - workflow: Aktuelles Workflow-Objekt + message_user: Message object with user request + workflow: Current workflow object Returns: - Antwort des Projektleiters mit obj_final_documents, obj_workplan und obj_user_response + Project manager's response with obj_final_documents, obj_workplan and obj_user_response """ - # Verfügbare Agenten mit ihren Fähigkeiten abrufen + # Get available agents with their capabilities available_agents = self.agent_profiles() - # Erstelle eine Zusammenfassung des Workflows + # Create a workflow summary workflow_summary = await self.workflow_summarize(workflow, message_user) - # Liste der aktuell verfügbaren Dokumente aus User-Input oder bereits generierten Dokumenten erstellen + # Create a list of currently available documents from user input or previously generated documents available_documents = self.available_documents_get(workflow, message_user) available_docs_str = json.dumps(available_documents, indent=2) - # Erstelle den Prompt für den Projektleiter + # Create the prompt for the project manager with language detection requirement prompt = f""" Based on the user request and the provided documents, please analyze the requirements and create a processing plan. +Also, identify the language of the user's request and include it in your response. {message_user.get('content')} @@ -145,6 +171,7 @@ Please analyze the request and create: 1. A list of required result documents (obj_final_documents) 2. A plan for executing agents (obj_workplan) 3. A clear response to the user explaining what you're doing (obj_user_response) +4. Identified language of the user's request (user_language) ## IMPORTANT RULES FOR THE WORKPLAN: 1. Each input document must either already exist (provided by the user or previously created by an agent) or be created by an agent before it's used. @@ -176,7 +203,8 @@ JSON_OUTPUT = {{ }} # Multiple agent tasks can be added here and should build logically on each other ], - "obj_user_response": "Information to the user about how his request will be solved." + "obj_user_response": "Information to the user about how his request will be solved.", + "user_language": "en" # Language code (e.g., en, de, fr, es) based on the user's request }} ## RULES for input_documents: @@ -199,9 +227,9 @@ JSON_OUTPUT = {{ 4. If you use label for an existing file """ - # Rufe den AI-Service auf, um die Antwort des Projektleiters zu erhalten + # Call the AI service through lucy_interface for language support logger.debug(f"Planning prompt: {prompt}") - project_manager_output = await self.ai_service.call_api([ + project_manager_output = await self.lucy_interface.call_ai([ { "role": "system", "content": "You are an experienced project manager who analyzes user requests and creates work plans. You pay very careful attention to ensure that all document dependencies are correct and that no non-existent documents are defined as inputs. The output follows strictly the specified format." @@ -212,37 +240,40 @@ JSON_OUTPUT = {{ } ]) - # Parsen der JSON-Antwort + # Parse the JSON response return self.parse_json_response(project_manager_output) async def chat_message_to_workflow(self, role: str, agent_name: str, chat_message: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]: """ - Integriert Benutzereingaben in ein Message-Objekt inklusive Dateien mit vollständigen Inhalten. + Integrates user inputs into a Message object including files with complete contents. Args: - chat_message: Eingabedaten "prompt"=str, "list_file_id"=[] + role: Role of the message sender ('user' or 'assistant') + agent_name: Name of the agent, if message is from an agent + chat_message: Input data with "prompt"=str, "list_file_id"=[] + workflow: Current workflow object Returns: - Message-Objekt mit Inhalt und Dokumenten samt Inhalten + Message object with content and documents including contents """ logger.info(f"Message from {role} {agent_name} sent with {len(chat_message.get('list_file_id', []))} documents") logger.debug(f"message = {self.parse_json2text(chat_message)}.") - # Nachrichteninhalt überprüfen + # Check message content message_content = chat_message.get("prompt", "") if isinstance(message_content, dict) and "content" in message_content: message_content = message_content["content"] - # Wenn Nachrichteninhalt leer ist, kein Chat + # If message content is empty, no chat if role=="user" and (message_content is None or message_content.strip() == ""): logger.warning(f"Empty message, no chat") message_content = "(No user input received)" - # Zusätzliche Dateien verarbeiten mit vollständigen Inhalten + # Process additional files with complete contents additional_fileids = chat_message.get("list_file_id", []) additional_files = await self.process_file_ids(additional_fileids) - # Nachrichtenobjekt erstellen + # Create message object message_object = { "role": role, "agent_name": agent_name, @@ -250,11 +281,11 @@ JSON_OUTPUT = {{ "documents": additional_files } - message_object=self.message_add(workflow, message_object) + message_object = self.message_add(workflow, message_object) logger.debug(f"message_user = {self.parse_json2text(message_object)}.") return message_object - async def chat_final_message(self, obj_user_response: str, obj_final_documents: List[Dict[str, Any]], obj_results: List[Dict[str, Any]], ) -> Dict[str, Any]: + async def chat_final_message(self, obj_user_response: str, obj_final_documents: List[Dict[str, Any]], obj_results: List[Dict[str, Any]]) -> Dict[str, Any]: """ Creates the final response message with review of proposed and delivered. @@ -274,7 +305,7 @@ JSON_OUTPUT = {{ # Find matching document in results for doc in obj_results: - doc_name=self.get_filename(doc) + doc_name = self.get_filename(doc) # Check if this document matches the answer specification if doc_name == answer_label: content_ref = [] @@ -287,18 +318,19 @@ JSON_OUTPUT = {{ matching_documents.append(doc_ref) break - final_prompt = await self.ai_service.call_api([ + # Use the lucy_interface for language-aware AI calls + final_prompt = await self.lucy_interface.call_ai([ {"role": "system", "content": "You are a project manager, who delivers results to a user."}, {"role": "user", "content": f""" - Give the final short feedback to the user with reference to the initial statement (obj_user_response). Provide a list of delivered files (files_deliveded). If in the list of delivered files (files_delivered) some files from the original list (files_promised) are not available, then just give a comment on this, otherwise task is completed. + Give the final short feedback to the user with reference to the initial statement (obj_user_response). Provide a list of delivered files (files_delivered). If in the list of delivered files (files_delivered) some files from the original list (files_promised) are not available, then just give a comment on this, otherwise task is completed. Here the data: obj_user_response = {self.parse_json2text(obj_user_response)} files_promised = {self.parse_json2text(matching_documents)} - files_deliveded = {self.parse_json2text(obj_user_response)} + files_delivered = {self.parse_json2text(obj_user_response)} """ } - ]) + ], produce_user_answer=True) # Create basic message structure with proper fields logger.debug(f"FINAL PROMPT = {self.parse_json2text(final_prompt)}.") @@ -360,6 +392,7 @@ JSON_OUTPUT = {{ } self.lucy_interface.create_workflow(workflow_db) + self.log_add(workflow, GLOBAL_SETTINGS["workflow_status_messages"]["init"], level="info", progress=0) return workflow else: # Load existing workflow @@ -391,6 +424,7 @@ JSON_OUTPUT = {{ } self.lucy_interface.update_workflow(workflow_id, workflow_update) + self.log_add(workflow, GLOBAL_SETTINGS["workflow_status_messages"]["running"], level="info", progress=0) return workflow def workflow_finish(self, workflow: Dict[str, Any]) -> Dict[str, Any]: @@ -416,23 +450,24 @@ JSON_OUTPUT = {{ # Save workflow state to database - only relevant fields, not the messages list self.lucy_interface.update_workflow(workflow["id"], workflow_update) + self.log_add(workflow, GLOBAL_SETTINGS["workflow_status_messages"]["completed"], level="info", progress=100) return workflow async def workflow_summarize(self, workflow: Dict[str, Any], message_user: Dict[str, Any]) -> str: """ - Erstellt eine Zusammenfassung des Workflows ohne die aktuelle User-Message. + Creates a summary of the workflow without the current user message. Args: - workflow: Workflow-Objekt - prompt: Anweisungen zur Erstellung der Zusammenfassung + workflow: Workflow object + message_user: Current user message Returns: - Zusammenfassung des Workflows + Summary of the workflow """ if not workflow or "messages" not in workflow or not workflow["messages"]: - return "" # die erste Message + return "" # first message - # Nachrichten in umgekehrter Reihenfolge durchgehen (neueste zuerst) + # Go through messages in reverse order (newest first) messages = sorted(workflow["messages"], key=lambda m: m.get("sequence_no", 0), reverse=False) summary_parts = [] @@ -449,10 +484,10 @@ JSON_OUTPUT = {{ def agent_profiles(self) -> List[Dict[str, Any]]: """ - Ruft Informationen über alle verfügbaren Agenten ab. + Gets information about all available agents. Returns: - Liste mit Informationen über alle verfügbaren Agenten + List with information about all available agents """ return self.agent_registry.get_agent_infos() @@ -469,7 +504,7 @@ JSON_OUTPUT = {{ """ prepared_inputs = [] - # Sortiere die Workflow-Nachrichten nach Sequenznummer (absteigend) + # Sort workflow messages by sequence number (descending) sorted_messages = sorted( workflow.get("messages", []), key=lambda m: m.get("sequence_no", 0), @@ -548,8 +583,8 @@ JSON_OUTPUT = {{ # Extract and provide only the relevant information as requested. """ - # Call the AI service to process the content - processed_data = await self.ai_service.call_api([ + # Call the AI service through lucy_interface for language support + processed_data = await self.lucy_interface.call_ai([ {"role": "system", "content": "You are a document processing assistant. Extract only the relevant information as requested."}, {"role": "user", "content": ai_prompt} ]) @@ -582,14 +617,14 @@ JSON_OUTPUT = {{ Returns: List of document objects created by the agent """ - # Extract task information + # 1. Extract task information agent_name = task.get("agent") agent_prompt = task.get("prompt", "") # Log the current step output_labels = [d.get("label", "unknown") for d in task.get("output_documents", [])] step_info = f"Agent '{agent_name}' to create {', '.join(output_labels)}." - self.log_add(workflow, step_info) + self.log_add(workflow, step_info, level="info") # Check if prompt is empty if agent_prompt == "": @@ -624,7 +659,8 @@ JSON_OUTPUT = {{ "context": { "workflow_round": workflow.get("current_round", 1), "agent_type": agent_name, - "timestamp": datetime.now().isoformat() + "timestamp": datetime.now().isoformat(), + "language": self.lucy_interface.user_language # Pass language to agent } } @@ -641,7 +677,8 @@ JSON_OUTPUT = {{ # Log the agent response self.log_add( workflow, - f"Agent '{agent_name}' completed task. Feedback: {agent_results.get('feedback', 'No feedback provided')}" + f"Agent '{agent_name}' completed task. Feedback: {agent_results.get('feedback', 'No feedback provided')}", + level="info" ) # Store produced files and prepare input object for message @@ -803,29 +840,29 @@ JSON_OUTPUT = {{ async def message_summarize(self, message: Dict[str, Any]) -> str: """ - Erstellt eine Zusammenfassung einer Nachricht einschließlich ihrer Dokumente. + Creates a summary of a message including its documents. Args: - message: Zu summarisierende Nachricht - prompt: Anweisungen zur Erstellung der Zusammenfassung + message: Message to summarize Returns: - Zusammenfassung der Nachricht + Summary of the message """ role = message.get("role", "undefined") agent_name = message.get("agent_name", "") content = message.get("content", "") try: - content_summary = await self.ai_service.call_api([ + # Use the lucy_interface for language-aware AI calls + content_summary = await self.lucy_interface.call_ai([ {"role": "system", "content": f"You are a chat message summarizer. Create a very concise summary (2-3 sentences, maximum 300 characters)"}, {"role": "user", "content": content} ]) except Exception as e: - logger.error(f"Fehler bei der Zusammenfassung: {str(e)}") + logger.error(f"Error creating summary: {str(e)}") content_summary = content[:200] + "..." - # Dokumente zusammenfassen + # Summarize documents docs_summary = "" if "documents" in message and message["documents"]: docs_list = [] @@ -853,7 +890,8 @@ JSON_OUTPUT = {{ is_text = content.get("metadata", {}).get("is_text", False) try: - summary = await self.ai_service.call_api([ + # Use the lucy_interface for language-aware AI calls + summary = await self.lucy_interface.call_ai([ {"role": "system", "content": "You are a content summarizer. Create very concise summary (1-2 sentences, maximum 200 characters) about this file."}, {"role": "user", "content": f"Summarize this {content_type} content briefly:\n\n{data}"} ]) @@ -985,57 +1023,57 @@ JSON_OUTPUT = {{ def save_document_to_file(self, document: Dict[str, Any]) -> Optional[int]: """ - Speichert ein Document als Datei in der Datenbank und gibt die File-ID zurück. + Saves a Document as a file in the database and returns the File-ID. Args: - document: Document-Objekt mit Inhalten + document: Document object with contents Returns: - File-ID oder None bei Fehler + File-ID or None on error """ try: if not document or "contents" not in document or not document["contents"]: - logger.warning("Dokument hat keine Inhalte zum Speichern") + logger.warning("Document has no contents to save") return None - # Nimm den ersten Inhalt als Hauptinhalt + # Take the first content as main content main_content = document["contents"][0] name = main_content.get("name", "document") content_type = main_content.get("content_type", "text/plain") data = main_content.get("data", b"") - # Binäre Daten sicherstellen + # Ensure binary data if isinstance(data, str): data = data.encode('utf-8') - # Datei in der Datenbank speichern + # Save file in the database file_meta = self.lucy_interface.save_uploaded_file(data, name) if file_meta and "id" in file_meta: - # Aktualisiere das Document mit der File-ID + # Update the Document with the File-ID document["file_id"] = file_meta["id"] return file_meta["id"] return None except Exception as e: - logger.error(f"Fehler beim Speichern des Dokuments als Datei: {str(e)}") + logger.error(f"Error saving document as file: {str(e)}") return None def add_document_to_message(self, message: Dict[str, Any], document: Dict[str, Any]) -> Dict[str, Any]: """ - Fügt ein Document zu einer Nachricht hinzu. + Adds a Document to a message. Args: - message: Nachricht, zu der das Dokument hinzugefügt werden soll - document: Hinzuzufügendes Document + message: Message to which the document should be added + document: Document to add Returns: - Aktualisierte Nachricht + Updated message """ - # Sicherstellen, dass die Dokumente-Liste existiert + # Ensure the documents list exists if "documents" not in message: message["documents"] = [] - # Document hinzufügen + # Add Document message["documents"].append(document) return message @@ -1044,6 +1082,15 @@ JSON_OUTPUT = {{ ### Tools def get_filename(self, document: Dict[str, Any]) -> str: + """ + Gets the filename from a document by combining name and extension. + + Args: + document: Document object + + Returns: + Filename with extension + """ name = document.get("name", "unnamed") ext = document.get("ext", "") if ext: @@ -1051,45 +1098,55 @@ JSON_OUTPUT = {{ return name def log_add(self, workflow: Dict[str, Any], message: str, level: str = "info", - agent_id: Optional[str] = None, agent_name: Optional[str] = None) -> str: + progress: Optional[int] = None) -> str: """ - Fügt einen Log-Eintrag zum Workflow hinzu und loggt diesen auch im Logger. + Adds a log entry to the workflow and also logs it in the logger. + Enhanced with standardized formatting and workflow status tracking. Args: - workflow: Workflow-Objekt - message: Log-Nachricht - level: Log-Level (info, warning, error) - agent_id: Optional - ID des Agenten - agent_name: Optional - Name des Agenten + workflow: Workflow object + message: Log message + level: Log level (info, warning, error) + progress: Optional - Progress value (0-100) Returns: - ID des erstellten Log-Eintrags + ID of the created log entry """ - # Sicherstellen, dass Logs-Liste existiert + # Ensure logs list exists if "logs" not in workflow: workflow["logs"] = [] - # Log-ID generieren + # Generate log ID log_id = f"log_{str(uuid.uuid4())}" - # Log-Eintrag erstellen + # Get workflow status + workflow_status = workflow.get("status", "running") + + # Set agent_name from global settings + agent_name = GLOBAL_SETTINGS.get("system_name", "AI Assistant") + + # Create log entry log_entry = { "id": log_id, "workflow_id": workflow["id"], "message": message, "type": level, "timestamp": datetime.now().isoformat(), - "agent_id": agent_id, - "agent_name": agent_name + "agent_name": agent_name, + "status": workflow_status } - # Log zum Workflow hinzufügen + # Add progress if provided + if progress is not None: + log_entry["progress"] = progress + + # Add log to workflow workflow["logs"].append(log_entry) - # In Datenbank speichern + # Save in database self.lucy_interface.create_workflow_log(log_entry) - # Auch im Logger loggen + # Also log in logger if level == "info": logger.info(f"Workflow {workflow['id']}: {message}") elif level == "warning": @@ -1101,36 +1158,36 @@ JSON_OUTPUT = {{ def parse_json2text(self, json_obj: Any) -> str: """ - Konvertiert ein JSON-Objekt in eine lesbare Textdarstellung. + Converts a JSON object to a readable text representation. Args: - json_obj: Zu konvertierendes JSON-Objekt + json_obj: JSON object to convert Returns: - Formatierte Textdarstellung + Formatted text representation """ if not json_obj: - return "Keine Daten vorhanden" + return "No data available" try: - # Formatieren mit Einrückung für bessere Lesbarkeit + # Format with indentation for better readability return json.dumps(json_obj, indent=2, ensure_ascii=False) except Exception as e: - logger.error(f"Fehler bei JSON-Konvertierung: {str(e)}") + logger.error(f"Error in JSON conversion: {str(e)}") return str(json_obj) def parse_json_response(self, response_text: str) -> Dict[str, Any]: """ - Parst die JSON-Antwort aus einem Text. + Parses the JSON response from a text. Args: - response_text: Text mit JSON-Inhalt + response_text: Text with JSON content Returns: - Geparste JSON-Daten + Parsed JSON data """ try: - # Extrahiere JSON aus dem Text (falls mit anderen Inhalten vermischt) + # Extract JSON from the text (if mixed with other content) json_start = response_text.find('{') json_end = response_text.rfind('}') + 1 @@ -1138,34 +1195,35 @@ JSON_OUTPUT = {{ json_str = response_text[json_start:json_end] return json.loads(json_str) else: - # Versuche den gesamten Text zu parsen + # Try to parse the entire text return json.loads(response_text) except json.JSONDecodeError as e: - logger.error(f"JSON-Parse-Fehler: {str(e)}") - # Fallback: Leere Struktur zurückgeben + logger.error(f"JSON parsing error: {str(e)}") + # Fallback: Return empty structure return { "obj_final_documents": [], "obj_workplan": [], - "obj_user_response": "Sorry, I could not parse your data." + "obj_user_response": "Sorry, I could not parse your data.", + "user_language": "en" } -# Singleton-Factory für den ChatManager +# Singleton factory for the ChatManager _chat_managers = {} def get_chat_manager(mandate_id: int = 0, user_id: int = 0) -> ChatManager: """ - Gibt einen ChatManager für den angegebenen Kontext zurück. - Wiederverwendet bestehende Instanzen. + Returns a ChatManager for the specified context. + Reuses existing instances. Args: - mandate_id: ID des Mandanten - user_id: ID des Benutzers + mandate_id: ID of the mandate + user_id: ID of the user Returns: - ChatManager-Instanz + ChatManager instance """ context_key = f"{mandate_id}_{user_id}" if context_key not in _chat_managers: _chat_managers[context_key] = ChatManager(mandate_id, user_id) - return _chat_managers[context_key] \ No newline at end of file + return _chat_managers[context_key] \ No newline at end of file diff --git a/modules/chat_content_extraction.py b/modules/chat_content_extraction.py index 3fa5485a..27d0829e 100644 --- a/modules/chat_content_extraction.py +++ b/modules/chat_content_extraction.py @@ -1,6 +1,6 @@ """ -Modul zur Extraktion von Inhalten aus verschiedenen Dateiformaten. -Bietet spezialisierte Funktionen für die Verarbeitung von Text, PDF, Office-Dokumenten, Bildern usw. +Module for extracting content from various file formats. +Provides specialized functions for processing text, PDF, Office documents, images, etc. """ import logging @@ -9,7 +9,7 @@ import io from typing import Dict, Any, List, Optional, Union, Tuple import base64 -# Logger konfigurieren +# Configure logger logger = logging.getLogger(__name__) # Optional imports - only loaded when needed @@ -19,26 +19,26 @@ image_processor_loaded = False def get_document_contents(file_metadata: Dict[str, Any], file_content: bytes) -> List[Dict[str, Any]]: """ - Hauptfunktion zur Extraktion von Inhalten aus einer Datei basierend auf dem MIME-Typ. - Delegiert an spezialisierte Extraktionsfunktionen. + Main function for extracting content from a file based on its MIME type. + Delegates to specialized extraction functions. Args: - file_metadata: Metadaten der Datei (Name, MIME-Typ, etc.) - file_content: Binärdaten der Datei + file_metadata: File metadata (Name, MIME type, etc.) + file_content: Binary data of the file Returns: - Liste von Document-Content-Objekten mit metadata und is_text Flag + List of Document-Content objects with metadata and is_text flag """ try: mime_type = file_metadata.get("mime_type", "application/octet-stream") file_name = file_metadata.get("name", "unknown") - logger.info(f"Extrahiere Inhalte aus Datei '{file_name}' (MIME-Typ: {mime_type})") + logger.info(f"Extracting content from file '{file_name}' (MIME type: {mime_type})") - # Inhalte basierend auf MIME-Typ extrahieren + # Extract content based on MIME type contents = [] - # Text-basierte Formate + # Text-based formats if mime_type.startswith("text/") or mime_type in [ "application/json", "application/xml", @@ -51,42 +51,42 @@ def get_document_contents(file_metadata: Dict[str, Any], file_content: bytes) -> elif mime_type == "text/csv": contents.extend(extract_csv_content(file_name, file_content)) - # Bilder + # Images elif mime_type.startswith("image/"): contents.extend(extract_image_content(file_name, file_content, mime_type)) - # PDF Dokumente + # PDF Documents elif mime_type == "application/pdf": contents.extend(extract_pdf_content(file_name, file_content)) - # Word-Dokumente + # Word Documents elif mime_type in [ "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "application/msword" ]: contents.extend(extract_word_content(file_name, file_content, mime_type)) - # Excel-Dokumente + # Excel Documents elif mime_type in [ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/vnd.ms-excel" ]: contents.extend(extract_excel_content(file_name, file_content, mime_type)) - # PowerPoint-Dokumente + # PowerPoint Documents elif mime_type in [ "application/vnd.openxmlformats-officedocument.presentationml.presentation", "application/vnd.ms-powerpoint" ]: contents.extend(extract_powerpoint_content(file_name, file_content, mime_type)) - # Binärdaten als Fallback für unbekannte Formate + # Binary data as fallback for unknown formats else: contents.extend(extract_binary_content(file_name, file_content, mime_type)) - # Fallback, wenn keine Inhalte extrahiert werden konnten + # Fallback when no content could be extracted if not contents: - logger.warning(f"Keine Inhalte aus Datei '{file_name}' extrahiert, verwende Binär-Fallback") + logger.warning(f"No content extracted from file '{file_name}', using binary fallback") contents.append({ "sequence_nr": 1, "name": '1_undefined', @@ -99,7 +99,6 @@ def get_document_contents(file_metadata: Dict[str, Any], file_content: bytes) -> }) # Add generic attributes for all documents - for content in contents: if isinstance(content.get("data"), bytes): content["data"] = base64.b64encode(content["data"]).decode('utf-8') @@ -108,12 +107,12 @@ def get_document_contents(file_metadata: Dict[str, Any], file_content: bytes) -> content["metadata"] = {} content["metadata"]["base64_encoded"] = True - logger.info(f"Erfolgreich {len(contents)} Inhalte aus Datei '{file_name}' extrahiert") + logger.info(f"Successfully extracted {len(contents)} content items from file '{file_name}'") return contents except Exception as e: - logger.error(f"Fehler bei der Inhaltsextraktion: {str(e)}") - # Fallback bei Fehler - Originaldaten zurückgeben + logger.error(f"Error during content extraction: {str(e)}") + # Fallback on error - return original data return [{ "sequence_nr": 1, "name": file_metadata.get("name", "unknown"), @@ -127,60 +126,60 @@ def get_document_contents(file_metadata: Dict[str, Any], file_content: bytes) -> def _load_pdf_extractor(): - """Lädt die PDF-Extraktions-Bibliotheken bei Bedarf""" + """Loads PDF extraction libraries when needed""" global pdf_extractor_loaded if not pdf_extractor_loaded: try: global PyPDF2, fitz import PyPDF2 - import fitz # PyMuPDF für umfangreichere PDF-Verarbeitung + import fitz # PyMuPDF for more extensive PDF processing pdf_extractor_loaded = True - logger.info("PDF-Extraktions-Bibliotheken erfolgreich geladen") + logger.info("PDF extraction libraries successfully loaded") except ImportError as e: - logger.warning(f"PDF-Extraktions-Bibliotheken konnten nicht geladen werden: {e}") + logger.warning(f"PDF extraction libraries could not be loaded: {e}") def _load_office_extractor(): - """Lädt die Office-Dokument-Extraktions-Bibliotheken bei Bedarf""" + """Loads Office document extraction libraries when needed""" global office_extractor_loaded if not office_extractor_loaded: try: global docx, openpyxl - import docx # python-docx für Word-Dokumente - import openpyxl # für Excel-Dateien + import docx # python-docx for Word documents + import openpyxl # for Excel files office_extractor_loaded = True - logger.info("Office-Extraktions-Bibliotheken erfolgreich geladen") + logger.info("Office extraction libraries successfully loaded") except ImportError as e: - logger.warning(f"Office-Extraktions-Bibliotheken konnten nicht geladen werden: {e}") + logger.warning(f"Office extraction libraries could not be loaded: {e}") def _load_image_processor(): - """Lädt die Bild-Verarbeitungs-Bibliotheken bei Bedarf""" + """Loads image processing libraries when needed""" global image_processor_loaded if not image_processor_loaded: try: global PIL, Image from PIL import Image image_processor_loaded = True - logger.info("Bild-Verarbeitungs-Bibliotheken erfolgreich geladen") + logger.info("Image processing libraries successfully loaded") except ImportError as e: - logger.warning(f"Bild-Verarbeitungs-Bibliotheken konnten nicht geladen werden: {e}") + logger.warning(f"Image processing libraries could not be loaded: {e}") def extract_text_content(file_name: str, file_content: bytes, mime_type: str) -> List[Dict[str, Any]]: """ - Extrahiert Text aus Textdateien. + Extracts text from text files. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei - mime_type: MIME-Typ der Datei + file_name: Name of the file + file_content: Binary data of the file + mime_type: MIME type of the file Returns: - Liste von Text-Content-Objekten mit metadata.is_text = True + List of Text-Content objects with metadata.is_text = True """ try: - # Originaldateiendung beibehalten + # Keep original file extension file_extension = os.path.splitext(file_name)[1][1:] if os.path.splitext(file_name)[1] else "txt" - # Text-Inhalt extrahieren + # Extract text content text_content = file_content.decode('utf-8') return [{ "sequence_nr": 1, @@ -193,13 +192,13 @@ def extract_text_content(file_name: str, file_content: bytes, mime_type: str) -> } }] except UnicodeDecodeError: - logger.warning(f"Konnte Text aus Datei '{file_name}' nicht als UTF-8 decodieren, versuche andere Kodierungen") + logger.warning(f"Could not decode text from file '{file_name}' as UTF-8, trying alternative encodings") try: - # Versuche alternative Kodierungen + # Try alternative encodings for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: try: text_content = file_content.decode(encoding) - logger.info(f"Text erfolgreich mit Kodierung {encoding} decodiert") + logger.info(f"Text successfully decoded with encoding {encoding}") return [{ "sequence_nr": 1, "name": "1_text", # Simplified naming @@ -214,8 +213,8 @@ def extract_text_content(file_name: str, file_content: bytes, mime_type: str) -> except UnicodeDecodeError: continue - # Fallback auf Binärdaten, wenn keine Kodierung funktioniert - logger.warning(f"Konnte Text nicht decodieren, verwende Binärdaten") + # Fallback to binary data if no encoding works + logger.warning(f"Could not decode text, using binary data") return [{ "sequence_nr": 1, "name": "1_binary", # Simplified naming @@ -227,8 +226,8 @@ def extract_text_content(file_name: str, file_content: bytes, mime_type: str) -> } }] except Exception as e: - logger.error(f"Fehler bei der alternativen Textdekodierung: {str(e)}") - # Binärdaten als Fallback zurückgeben + logger.error(f"Error in alternative text decoding: {str(e)}") + # Return binary data as fallback return [{ "sequence_nr": 1, "name": "1_binary", # Simplified naming @@ -242,17 +241,17 @@ def extract_text_content(file_name: str, file_content: bytes, mime_type: str) -> def extract_csv_content(file_name: str, file_content: bytes) -> List[Dict[str, Any]]: """ - Extrahiert Inhalt aus CSV-Dateien. + Extracts content from CSV files. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei + file_name: Name of the file + file_content: Binary data of the file Returns: - Liste von CSV-Content-Objekten mit metadata.is_text = True + List of CSV-Content objects with metadata.is_text = True """ try: - # Text-Inhalt extrahieren + # Extract text content csv_content = file_content.decode('utf-8') return [{ "sequence_nr": 1, @@ -266,13 +265,13 @@ def extract_csv_content(file_name: str, file_content: bytes) -> List[Dict[str, A } }] except UnicodeDecodeError: - logger.warning(f"Konnte CSV aus Datei '{file_name}' nicht als UTF-8 decodieren, versuche andere Kodierungen") + logger.warning(f"Could not decode CSV from file '{file_name}' as UTF-8, trying alternative encodings") try: - # Versuche alternative Kodierungen für CSV + # Try alternative encodings for CSV for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: try: csv_content = file_content.decode(encoding) - logger.info(f"CSV erfolgreich mit Kodierung {encoding} decodiert") + logger.info(f"CSV successfully decoded with encoding {encoding}") return [{ "sequence_nr": 1, "name": "1_csv", # Simplified naming @@ -288,7 +287,7 @@ def extract_csv_content(file_name: str, file_content: bytes) -> List[Dict[str, A except UnicodeDecodeError: continue - # Fallback auf Binärdaten + # Fallback to binary data return [{ "sequence_nr": 1, "name": "1_binary", # Simplified naming @@ -300,7 +299,7 @@ def extract_csv_content(file_name: str, file_content: bytes) -> List[Dict[str, A } }] except Exception as e: - logger.error(f"Fehler bei der alternativen CSV-Dekodierung: {str(e)}") + logger.error(f"Error in alternative CSV decoding: {str(e)}") return [{ "sequence_nr": 1, "name": "1_binary", # Simplified naming @@ -314,23 +313,23 @@ def extract_csv_content(file_name: str, file_content: bytes) -> List[Dict[str, A def extract_image_content(file_name: str, file_content: bytes, mime_type: str) -> List[Dict[str, Any]]: """ - Extrahiert Inhalt aus Bilddateien und erzeugt ggf. Metadaten-Beschreibungen. + Extracts content from image files and optionally generates metadata descriptions. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei - mime_type: MIME-Typ der Datei + file_name: Name of the file + file_content: Binary data of the file + mime_type: MIME type of the file Returns: - Liste von Image-Content-Objekten mit metadata.is_text = False + List of Image-Content objects with metadata.is_text = False """ - # Dateiendung aus MIME-Typ oder Dateinamen extrahieren + # Extract file extension from MIME type or filename file_extension = mime_type.split('/')[-1] if file_extension == "jpeg": file_extension = "jpg" - # Wenn möglich, Bild analysieren und Metadaten extrahieren + # If possible, analyze image and extract metadata image_metadata = { "is_text": False, "format": "image" @@ -343,9 +342,9 @@ def extract_image_content(file_name: str, file_content: bytes, mime_type: str) - with io.BytesIO(file_content) as img_stream: try: img = Image.open(img_stream) - # Überprüfe, ob das Bild tatsächlich geladen wurde + # Check if the image was actually loaded img.verify() - # Um sicher weiterzuarbeiten, neu laden + # To safely continue working, reload img_stream.seek(0) img = Image.open(img_stream) image_metadata.update({ @@ -354,7 +353,7 @@ def extract_image_content(file_name: str, file_content: bytes, mime_type: str) - "width": img.width, "height": img.height }) - # Extrahiere EXIF-Daten, falls vorhanden + # Extract EXIF data if available if hasattr(img, '_getexif') and callable(img._getexif): exif = img._getexif() if exif: @@ -363,18 +362,18 @@ def extract_image_content(file_name: str, file_content: bytes, mime_type: str) - exif_data[f"tag_{tag_id}"] = str(value) image_metadata["exif"] = exif_data - # Erzeuge Bildbeschreibung + # Generate image description image_description = f"Image ({img.width}x{img.height}, {img.format}, {img.mode})" except Exception as inner_e: - logger.warning(f"Fehler beim Verarbeiten des Bildes: {str(inner_e)}") + logger.warning(f"Error processing image: {str(inner_e)}") image_metadata["error"] = str(inner_e) image_description = f"Image (unable to process: {str(inner_e)})" except Exception as e: - logger.warning(f"Konnte Bildmetadaten nicht extrahieren: {str(e)}") + logger.warning(f"Could not extract image metadata: {str(e)}") image_metadata["error"] = str(e) - # Bild-Inhalt zurückgeben + # Return image content contents = [{ "sequence_nr": 1, "name": "1_image", # Simplified naming @@ -384,7 +383,7 @@ def extract_image_content(file_name: str, file_content: bytes, mime_type: str) - "metadata": image_metadata }] - # Falls Bildbeschreibung vorhanden, als zusätzlichen Text-Content hinzufügen + # If image description available, add as additional text content if image_description: contents.append({ "sequence_nr": 2, @@ -402,24 +401,24 @@ def extract_image_content(file_name: str, file_content: bytes, mime_type: str) - def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, Any]]: """ - Extrahiert Text und Bilder aus PDF-Dateien. + Extracts text and images from PDF files. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei + file_name: Name of the file + file_content: Binary data of the file Returns: - Liste von PDF-Content-Objekten (Text und Bilder) mit metadata.is_text Flag + List of PDF-Content objects (text and images) with metadata.is_text flag """ contents = [] extracted_content_found = False try: - # PDF-Extraktions-Bibliotheken laden + # Load PDF extraction libraries _load_pdf_extractor() if not pdf_extractor_loaded: - logger.warning("PDF-Extraktion nicht möglich: Bibliotheken nicht verfügbar") - # Originaldatei als binären Inhalt hinzufügen + logger.warning("PDF extraction not possible: Libraries not available") + # Add original file as binary content contents.append({ "sequence_nr": 1, "name": "1_pdf", # Simplified naming @@ -433,13 +432,13 @@ def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, A }) return contents - # Text mit PyPDF2 extrahieren + # Extract text with PyPDF2 extracted_text = "" pdf_metadata = {} with io.BytesIO(file_content) as pdf_stream: pdf_reader = PyPDF2.PdfReader(pdf_stream) - # Metadaten extrahieren + # Extract metadata pdf_info = pdf_reader.metadata or {} for key, value in pdf_info.items(): if key.startswith('/'): @@ -447,14 +446,14 @@ def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, A else: pdf_metadata[key] = value - # Text aus allen Seiten extrahieren + # Extract text from all pages for page_num in range(len(pdf_reader.pages)): page = pdf_reader.pages[page_num] page_text = page.extract_text() if page_text: - extracted_text += f"--- Seite {page_num + 1} ---\n{page_text}\n\n" + extracted_text += f"--- Page {page_num + 1} ---\n{page_text}\n\n" - # Wenn Text gefunden wurde, als eigenen Content hinzufügen + # If text was found, add as separate content if extracted_text.strip(): extracted_content_found = True contents.append({ @@ -471,7 +470,7 @@ def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, A } }) - # Bilder mit PyMuPDF (fitz) extrahieren + # Extract images with PyMuPDF (fitz) try: with io.BytesIO(file_content) as pdf_stream: doc = fitz.open(stream=pdf_stream, filetype="pdf") @@ -489,7 +488,7 @@ def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, A image_bytes = base_image["image"] image_ext = base_image["ext"] - # Bild als Content hinzufügen + # Add image as content extracted_content_found = True contents.append({ "sequence_nr": len(contents) + 1, @@ -505,18 +504,18 @@ def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, A } }) except Exception as img_e: - logger.warning(f"Fehler bei der Extraktion von Bild {img_index} auf Seite {page_num + 1}: {str(img_e)}") + logger.warning(f"Error extracting image {img_index} on page {page_num + 1}: {str(img_e)}") - # Dokument schließen + # Close document doc.close() except Exception as img_extract_e: - logger.warning(f"Fehler bei der Bildextraktion aus PDF: {str(img_extract_e)}") + logger.warning(f"Error extracting images from PDF: {str(img_extract_e)}") except Exception as e: - logger.error(f"Fehler bei der PDF-Extraktion: {str(e)}") + logger.error(f"Error in PDF extraction: {str(e)}") - # Wenn keine Inhalte extrahiert wurden, füge das Original-PDF hinzu + # If no content was extracted, add the original PDF if not extracted_content_found: contents.append({ "sequence_nr": 1, @@ -534,28 +533,28 @@ def extract_pdf_content(file_name: str, file_content: bytes) -> List[Dict[str, A def extract_word_content(file_name: str, file_content: bytes, mime_type: str) -> List[Dict[str, Any]]: """ - Extrahiert Text und Bilder aus Word-Dokumenten. + Extracts text and images from Word documents. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei - mime_type: MIME-Typ der Datei + file_name: Name of the file + file_content: Binary data of the file + mime_type: MIME type of the file Returns: - Liste von Word-Content-Objekten (Text und ggf. Bilder) mit metadata.is_text Flag + List of Word-Content objects (text and possibly images) with metadata.is_text flag """ contents = [] extracted_content_found = False - # Dateiendung bestimmen + # Determine file extension file_extension = "docx" if mime_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" else "doc" try: - # Office-Extraktions-Bibliotheken laden + # Load Office extraction libraries _load_office_extractor() if not office_extractor_loaded: - logger.warning("Word-Extraktion nicht möglich: Bibliotheken nicht verfügbar") - # Originaldatei als binären Inhalt hinzufügen + logger.warning("Word extraction not possible: Libraries not available") + # Add original file as binary content contents.append({ "sequence_nr": 1, "name": "1_word", # Simplified naming @@ -569,17 +568,17 @@ def extract_word_content(file_name: str, file_content: bytes, mime_type: str) -> }) return contents - # Unterstützt nur DOCX (neueres Format) + # Only supports DOCX (newer format) if mime_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document": with io.BytesIO(file_content) as docx_stream: doc = docx.Document(docx_stream) - # Text extrahieren + # Extract text full_text = [] for para in doc.paragraphs: full_text.append(para.text) - # Tabellen extrahieren + # Extract tables for table in doc.tables: for row in table.rows: row_text = [] @@ -589,7 +588,7 @@ def extract_word_content(file_name: str, file_content: bytes, mime_type: str) -> extracted_text = "\n\n".join(full_text) - # Extrahierten Text als Content hinzufügen + # Add extracted text as content if extracted_text.strip(): extracted_content_found = True contents.append({ @@ -606,12 +605,12 @@ def extract_word_content(file_name: str, file_content: bytes, mime_type: str) -> } }) else: - logger.warning(f"Extraktion aus altem Word-Format (DOC) nicht unterstützt") + logger.warning(f"Extraction from old Word format (DOC) not supported") except Exception as e: - logger.error(f"Fehler bei der Word-Extraktion: {str(e)}") + logger.error(f"Error in Word extraction: {str(e)}") - # Wenn keine Inhalte extrahiert wurden, füge das Original-Dokument hinzu + # If no content was extracted, add the original document if not extracted_content_found: contents.append({ "sequence_nr": 1, @@ -629,28 +628,28 @@ def extract_word_content(file_name: str, file_content: bytes, mime_type: str) -> def extract_excel_content(file_name: str, file_content: bytes, mime_type: str) -> List[Dict[str, Any]]: """ - Extrahiert Tabellendaten aus Excel-Dateien. + Extracts table data from Excel files. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei - mime_type: MIME-Typ der Datei + file_name: Name of the file + file_content: Binary data of the file + mime_type: MIME type of the file Returns: - Liste von Excel-Content-Objekten mit metadata.is_text Flag + List of Excel-Content objects with metadata.is_text flag """ contents = [] extracted_content_found = False - # Dateiendung bestimmen + # Determine file extension file_extension = "xlsx" if mime_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" else "xls" try: - # Office-Extraktions-Bibliotheken laden + # Load Office extraction libraries _load_office_extractor() if not office_extractor_loaded: - logger.warning("Excel-Extraktion nicht möglich: Bibliotheken nicht verfügbar") - # Originaldatei als binären Inhalt hinzufügen + logger.warning("Excel extraction not possible: Libraries not available") + # Add original file as binary content contents.append({ "sequence_nr": 1, "name": "1_excel", # Simplified naming @@ -664,16 +663,16 @@ def extract_excel_content(file_name: str, file_content: bytes, mime_type: str) - }) return contents - # Unterstützt nur XLSX (neueres Format) + # Only supports XLSX (newer format) if mime_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": with io.BytesIO(file_content) as xlsx_stream: workbook = openpyxl.load_workbook(xlsx_stream, data_only=True) - # Jedes Arbeitsblatt als separaten CSV-Content extrahieren + # Extract each worksheet as separate CSV content for sheet_index, sheet_name in enumerate(workbook.sheetnames): sheet = workbook[sheet_name] - # Daten als CSV formatieren + # Format data as CSV csv_rows = [] for row in sheet.iter_rows(): csv_row = [] @@ -687,7 +686,7 @@ def extract_excel_content(file_name: str, file_content: bytes, mime_type: str) - csv_content = "\n".join(csv_rows) - # Als CSV-Content hinzufügen + # Add as CSV content if csv_content.strip(): extracted_content_found = True sheet_safe_name = sheet_name.replace(" ", "_").replace("/", "_").replace("\\", "_") @@ -705,12 +704,12 @@ def extract_excel_content(file_name: str, file_content: bytes, mime_type: str) - } }) else: - logger.warning(f"Extraktion aus altem Excel-Format (XLS) nicht unterstützt") + logger.warning(f"Extraction from old Excel format (XLS) not supported") except Exception as e: - logger.error(f"Fehler bei der Excel-Extraktion: {str(e)}") + logger.error(f"Error in Excel extraction: {str(e)}") - # Wenn keine Inhalte extrahiert wurden, füge das Original-Dokument hinzu + # If no content was extracted, add the original document if not extracted_content_found: contents.append({ "sequence_nr": 1, @@ -728,18 +727,18 @@ def extract_excel_content(file_name: str, file_content: bytes, mime_type: str) - def extract_powerpoint_content(file_name: str, file_content: bytes, mime_type: str) -> List[Dict[str, Any]]: """ - Extrahiert Inhalte aus PowerPoint-Präsentationen. + Extracts content from PowerPoint presentations. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei - mime_type: MIME-Typ der Datei + file_name: Name of the file + file_content: Binary data of the file + mime_type: MIME type of the file Returns: - Liste von PowerPoint-Content-Objekten mit metadata.is_text = False + List of PowerPoint-Content objects with metadata.is_text = False """ - # Für PowerPoint geben wir aktuell nur die originale Binärdatei zurück - # Eine vollständige Extraktion würde mehr spezialisierte Bibliotheken erfordern + # For PowerPoint, we currently only return the original binary file + # A complete extraction would require more specialized libraries file_extension = "pptx" if mime_type == "application/vnd.openxmlformats-officedocument.presentationml.presentation" else "ppt" return [{ "sequence_nr": 1, @@ -755,15 +754,15 @@ def extract_powerpoint_content(file_name: str, file_content: bytes, mime_type: s def extract_binary_content(file_name: str, file_content: bytes, mime_type: str) -> List[Dict[str, Any]]: """ - Fallback für binäre Dateien, bei denen keine spezifische Extraktion möglich ist. + Fallback for binary files where no specific extraction is possible. Args: - file_name: Name der Datei - file_content: Binärdaten der Datei - mime_type: MIME-Typ der Datei + file_name: Name of the file + file_content: Binary data of the file + mime_type: MIME type of the file Returns: - Liste mit einem binären Content-Objekt mit metadata.is_text = False + List with a binary Content object with metadata.is_text = False """ file_extension = os.path.splitext(file_name)[1][1:] if os.path.splitext(file_name)[1] else "bin" return [{ diff --git a/modules/def_attributes.py b/modules/def_attributes.py index b3eac248..6a60019c 100644 --- a/modules/def_attributes.py +++ b/modules/def_attributes.py @@ -1,7 +1,7 @@ from pydantic import BaseModel, Field from typing import List, Dict, Any, Optional -# Definiere das Modell für Attributdefinitionen +# Define the model for attribute definitions class AttributeDefinition(BaseModel): name: str label: str @@ -16,7 +16,7 @@ class AttributeDefinition(BaseModel): validation: Optional[Dict[str, Any]] = None help_text: Optional[str] = None -# Hilfsklassen für Typzuordnung +# Helper classes for type mapping type_mappings = { "int": "number", "str": "string", @@ -30,7 +30,7 @@ type_mappings = { "Optional[Dict[str, Any]]": "object" } -# Spezielle Feldtypen basierend auf Namenskonventionen +# Special field types based on naming conventions special_field_types = { "content": "textarea", "description": "textarea", @@ -42,59 +42,59 @@ special_field_types = { "type": "select" } -# Funktion zum Konvertieren eines Pydantic-Modells in Attributdefinitionen +# Function to convert a Pydantic model into attribute definitions def get_model_attributes(model_class, user_language="de"): """ - Konvertiert ein Pydantic-Modell in eine Liste von AttributeDefinition-Objekten + Converts a Pydantic model into a list of AttributeDefinition objects """ attributes = [] - # Gehe alle Felder im Modell durch + # Go through all fields in the model for i, (field_name, field) in enumerate(model_class.__fields__.items()): - # Überspringe interne Felder + # Skip internal fields if field_name.startswith('_') or field_name in ["label", "field_labels"]: continue - # Bestimme den Feldtyp + # Determine the field type field_type = type_mappings.get(str(field.type_), "string") - # Prüfe auf spezielle Feldtypen + # Check for special field types if field_name in special_field_types: field_type = special_field_types[field_name] - # Hole das Label (falls vorhanden) + # Get the label (if available) field_label = field_name.replace('_', ' ').capitalize() if hasattr(model_class, 'field_labels') and field_name in model_class.field_labels: label_obj = model_class.field_labels[field_name] field_label = label_obj.get_label(user_language) - # Standardwerte und Required-Status ermitteln + # Determine default values and required status required = field.required default_value = field.default if not field.required else None - # Hinweise auf Validierungsregeln + # Check for validation rules validation = None if field.validators: validation = {"has_validators": True} - # Platzhaltertext - placeholder = f"Bitte {field_label} eingeben" + # Placeholder text + placeholder = f"Please enter {field_label}" - # Spezielle Optionen für Select-Felder + # Special options for Select fields options = None if field_type == "select": if field_name == "type" and model_class.__name__ == "Agent": options = [ - {"value": "Analyse", "label": "Analyse"}, + {"value": "Analysis", "label": "Analysis"}, {"value": "Transformation", "label": "Transformation"}, - {"value": "Generierung", "label": "Generierung"}, - {"value": "Klassifikation", "label": "Klassifikation"}, - {"value": "Benutzerdefiniert", "label": "Benutzerdefiniert"} + {"value": "Generation", "label": "Generation"}, + {"value": "Classification", "label": "Classification"}, + {"value": "Custom", "label": "Custom"} ] - # Extrahiere die Beschreibung aus dem Field-Objekt + # Extract description from Field object description = None - # Versuche, die description aus verschiedenen möglichen Quellen zu holen + # Try to get description from various possible sources if hasattr(field, 'field_info') and hasattr(field.field_info, 'description'): description = field.field_info.description elif hasattr(field, 'description'): @@ -102,7 +102,7 @@ def get_model_attributes(model_class, user_language="de"): elif hasattr(field, 'schema') and hasattr(field.schema, 'description'): description = field.schema.description - # Attributdefinition erstellen + # Create attribute definition attr_def = AttributeDefinition( name=field_name, label=field_label, @@ -115,7 +115,7 @@ def get_model_attributes(model_class, user_language="de"): visible=field_name not in ["hashed_password", "mandate_id", "user_id"], order=i, validation=validation, - help_text=description or "" # Setze leeren String als Standardwert, wenn keine Beschreibung gefunden wurde + help_text=description or "" # Set empty string as default value if no description found ) attributes.append(attr_def) diff --git a/modules/gateway_interface.py b/modules/gateway_interface.py index 96830b28..73a59379 100644 --- a/modules/gateway_interface.py +++ b/modules/gateway_interface.py @@ -15,36 +15,36 @@ pwd_context = CryptContext(schemes=["argon2"], deprecated="auto") class GatewayInterface: """ - Interface zum Gateway-System. - Verwaltet Benutzer und Mandanten. + Interface to the Gateway system. + Manages users and mandates. """ def __init__(self, mandate_id: int = None, user_id: int = None): """ - Initialisiert das Gateway-Interface mit optionalem Mandanten- und Benutzerkontext. + Initializes the Gateway Interface with optional mandate and user context. Args: - mandate_id: ID des aktuellen Mandanten (optional) - user_id: ID des aktuellen Benutzers (optional) + mandate_id: ID of the current mandate (optional) + user_id: ID of the current user (optional) """ - # Bei der Initialisierung kann der Kontext leer sein + # Context can be empty during initialization self.mandate_id = mandate_id self.user_id = user_id - # Datenmodell-Modul importieren + # Import data model module try: self.model_module = importlib.import_module("modules.gateway_model") - logger.info("gateway_model erfolgreich importiert") + logger.info("gateway_model successfully imported") except ImportError as e: - logger.error(f"Fehler beim Importieren von gateway_model: {e}") + logger.error(f"Error importing gateway_model: {e}") raise - # Datenbank initialisieren + # Initialize database self._initialize_database() def _initialize_database(self): """ - Initialisiert die Datenbank mit minimalen Objekten + Initializes the database with minimal objects """ self.db = DatabaseConnector( @@ -56,23 +56,23 @@ class GatewayInterface: user_id=self.user_id if self.user_id else 0 ) - # Erstelle den Root-Mandanten, falls nötig + # Create Root mandate if needed existing_mandate_id = self.get_initial_id("mandates") mandates = self.db.get_recordset("mandates") if existing_mandate_id is None or not mandates: - logger.info("Erstelle Root-Mandant") + logger.info("Creating Root mandate") root_mandate = { "name": "Root", "language": "de" } created_mandate = self.db.record_create("mandates", root_mandate) - logger.info(f"Root-Mandant wurde erstellt mit ID {created_mandate['id']}") + logger.info(f"Root mandate created with ID {created_mandate['id']}") - # Aktualisiere den Mandanten-Kontext + # Update mandate context self.mandate_id = created_mandate['id'] self.user_id = created_mandate['user_id'] - # Konnektor mit korrektem Kontext neu erstellen + # Recreate connector with correct context self.db = DatabaseConnector( db_host=APP_CONFIG.get("DB_SYSTEM_HOST"), db_database=APP_CONFIG.get("DB_SYSTEM_DATABASE"), @@ -82,11 +82,11 @@ class GatewayInterface: user_id=self.user_id ) - # Erstelle den Admin-Benutzer, falls nötig + # Create Admin user if needed existing_user_id = self.get_initial_id("users") users = self.db.get_recordset("users") if existing_user_id is None or not users: - logger.info("Erstelle Admin-Benutzer") + logger.info("Creating Admin user") admin_user = { "mandate_id": self.mandate_id, "username": "admin", @@ -94,16 +94,16 @@ class GatewayInterface: "full_name": "Administrator", "disabled": False, "language": "de", - "privilege": "sysadmin", # SysAdmin-Berechtigung - "hashed_password": self._get_password_hash("admin") # In der Produktion ein sicheres Passwort verwenden! + "privilege": "sysadmin", # SysAdmin privilege + "hashed_password": self._get_password_hash("admin") # Use a secure password in production! } created_user = self.db.record_create("users", admin_user) - logger.info(f"Admin-Benutzer wurde erstellt mit ID {created_user['id']}") + logger.info(f"Admin user created with ID {created_user['id']}") - # Aktualisiere den Benutzer-Kontext + # Update user context self.user_id = created_user['id'] - # Konnektor mit korrektem Kontext neu erstellen + # Recreate connector with correct context self.db = DatabaseConnector( db_host=APP_CONFIG.get("DB_SYSTEM_HOST"), db_database=APP_CONFIG.get("DB_SYSTEM_DATABASE"), @@ -113,37 +113,37 @@ class GatewayInterface: user_id=self.user_id ) def get_initial_id(self, table: str) -> Optional[int]: - """ Gibt die initiale ID für eine Tabelle zurück """ + """Returns the initial ID for a table""" return self.db.get_initial_id(table) def _get_password_hash(self, password: str) -> str: - """Erstellt einen Hash für ein Passwort""" + """Creates a hash for a password""" return pwd_context.hash(password) def _verify_password(self, plain_password: str, hashed_password: str) -> bool: - """Überprüft, ob das Passwort zum Hash passt""" + """Checks if the password matches the hash""" return pwd_context.verify(plain_password, hashed_password) def _get_current_timestamp(self) -> str: - """Gibt den aktuellen Zeitstempel im ISO-Format zurück""" + """Returns the current timestamp in ISO format""" from datetime import datetime return datetime.now().isoformat() - # Mandanten-Methoden + # Mandate methods def get_all_mandates(self) -> List[Dict[str, Any]]: - """Gibt alle Mandanten zurück""" + """Returns all mandates""" return self.db.get_recordset("mandates") def get_mandate(self, mandate_id: int) -> Optional[Dict[str, Any]]: - """Gibt einen Mandanten anhand seiner ID zurück""" + """Returns a mandate by its ID""" mandates = self.db.get_recordset("mandates", record_filter={"id": mandate_id}) if mandates: return mandates[0] return None def create_mandate(self, name: str, language: str = "de") -> Dict[str, Any]: - """Erstellt einen neuen Mandanten""" + """Creates a new mandate""" mandate_data = { "name": name, "language": language @@ -153,72 +153,72 @@ class GatewayInterface: def update_mandate(self, mandate_id: int, mandate_data: Dict[str, Any]) -> Dict[str, Any]: """ - Aktualisiert einen bestehenden Mandanten + Updates an existing mandate Args: - mandate_id: Die ID des zu aktualisierenden Mandanten - mandate_data: Die zu aktualisierenden Mandantendaten + mandate_id: The ID of the mandate to update + mandate_data: The mandate data to update Returns: - Dict[str, Any]: Die aktualisierten Mandantendaten + Dict[str, Any]: The updated mandate data Raises: - ValueError: Wenn der Mandant nicht gefunden wurde + ValueError: If the mandate is not found """ - # Prüfe, ob der Mandant existiert + # Check if the mandate exists mandate = self.get_mandate(mandate_id) if not mandate: - raise ValueError(f"Mandant mit ID {mandate_id} nicht gefunden") + raise ValueError(f"Mandate with ID {mandate_id} not found") - # Aktualisiere den Mandanten + # Update the mandate updated_mandate = self.db.record_modify("mandates", mandate_id, mandate_data) return updated_mandate def delete_mandate(self, mandate_id: int) -> bool: """ - Löscht einen Mandanten und alle damit verbundenen Benutzer und Daten + Deletes a mandate and all associated users and data Args: - mandate_id: Die ID des zu löschenden Mandanten + mandate_id: The ID of the mandate to delete Returns: - bool: True, wenn der Mandant erfolgreich gelöscht wurde, sonst False + bool: True if the mandate was successfully deleted, otherwise False """ - # Prüfe, ob der Mandant existiert + # Check if the mandate exists mandate = self.get_mandate(mandate_id) if not mandate: return False - # Prüfe, ob es der initiale Mandant ist + # Check if it's the initial mandate initial_mandate_id = self.get_initial_id("mandates") if initial_mandate_id is not None and mandate_id == initial_mandate_id: - logger.warning(f"Versuch, den Root-Mandanten zu löschen, wurde verhindert") + logger.warning(f"Attempt to delete the Root mandate was prevented") return False - # Finde alle Benutzer des Mandanten + # Find all users of the mandate users = self.get_users_by_mandate(mandate_id) - # Lösche alle Benutzer des Mandanten und ihre zugehörigen Daten + # Delete all users of the mandate and their associated data for user in users: self.delete_user(user["id"]) - # Lösche den Mandanten + # Delete the mandate success = self.db.record_delete("mandates", mandate_id) if success: - logger.info(f"Mandant mit ID {mandate_id} wurde erfolgreich gelöscht") + logger.info(f"Mandate with ID {mandate_id} was successfully deleted") else: - logger.error(f"Fehler beim Löschen des Mandanten mit ID {mandate_id}") + logger.error(f"Error deleting mandate with ID {mandate_id}") return success - # Benutzer-Methoden + # User methods def get_all_users(self) -> List[Dict[str, Any]]: - """Gibt alle Benutzer zurück""" + """Returns all users""" users = self.db.get_recordset("users") - # Entferne die Passwort-Hashes aus der Rückgabe + # Remove password hashes from the response for user in users: if "hashed_password" in user: del user["hashed_password"] @@ -226,23 +226,23 @@ class GatewayInterface: def get_users_by_mandate(self, mandate_id: int) -> List[Dict[str, Any]]: """ - Gibt alle Benutzer eines bestimmten Mandanten zurück + Returns all users of a specific mandate Args: - mandate_id: Die ID des Mandanten + mandate_id: The ID of the mandate Returns: - List[Dict[str, Any]]: Liste der Benutzer des Mandanten + List[Dict[str, Any]]: List of users in the mandate """ users = self.db.get_recordset("users", record_filter={"mandate_id": mandate_id}) - # Entferne die Passwort-Hashes aus der Rückgabe + # Remove password hashes from the response for user in users: if "hashed_password" in user: del user["hashed_password"] return users def get_user_by_username(self, username: str) -> Optional[Dict[str, Any]]: - """Gibt einen Benutzer anhand seines Benutzernamens zurück""" + """Returns a user by username""" users = self.db.get_recordset("users") for user in users: if user.get("username") == username: @@ -250,11 +250,11 @@ class GatewayInterface: return None def get_user(self, user_id: int) -> Optional[Dict[str, Any]]: - """Gibt einen Benutzer anhand seiner ID zurück""" + """Returns a user by ID""" users = self.db.get_recordset("users", record_filter={"id": user_id}) if users: user = users[0] - # Entferne das Passwort-Hash aus der Rückgabe für die API + # Remove password hash from the API response if "hashed_password" in user: user_copy = user.copy() del user_copy["hashed_password"] @@ -266,30 +266,30 @@ class GatewayInterface: full_name: str = None, language: str = "de", mandate_id: int = None, disabled: bool = False, privilege: str = "user") -> Dict[str, Any]: """ - Erstellt einen neuen Benutzer + Creates a new user Args: - username: Der Benutzername - password: Das Passwort - email: Die E-Mail-Adresse (optional) - full_name: Der vollständige Name (optional) - language: Die bevorzugte Sprache (Standard: "de") - mandate_id: Die ID des Mandanten (optional) - disabled: Ob der Benutzer deaktiviert ist (Standard: False) - privilege: Die Berechtigungsstufe (Standard: "user") + username: The username + password: The password + email: The email address (optional) + full_name: The full name (optional) + language: The preferred language (default: "de") + mandate_id: The ID of the mandate (optional) + disabled: Whether the user is disabled (default: False) + privilege: The privilege level (default: "user") Returns: - Dict[str, Any]: Die erstellten Benutzerdaten + Dict[str, Any]: The created user data Raises: - ValueError: Wenn der Benutzername bereits existiert + ValueError: If the username already exists """ - # Prüfe, ob der Benutzername bereits existiert + # Check if the username already exists existing_user = self.get_user_by_username(username) if existing_user: - raise ValueError(f"Benutzer '{username}' existiert bereits") + raise ValueError(f"User '{username}' already exists") - # Verwende den übergebenen mandate_id oder den aktuellen Kontext + # Use the provided mandate_id or the current context user_mandate_id = mandate_id if mandate_id is not None else self.mandate_id user_data = { @@ -305,7 +305,7 @@ class GatewayInterface: created_user = self.db.record_create("users", user_data) - # Entferne das Passwort-Hash aus der Rückgabe + # Remove password hash from the response if "hashed_password" in created_user: del created_user["hashed_password"] @@ -313,14 +313,14 @@ class GatewayInterface: def authenticate_user(self, username: str, password: str) -> Optional[Dict[str, Any]]: """ - Authentifiziert einen Benutzer anhand von Benutzername und Passwort + Authenticates a user by username and password Args: - username: Der Benutzername - password: Das Passwort + username: The username + password: The password Returns: - Optional[Dict[str, Any]]: Die Benutzerdaten oder None, wenn die Authentifizierung fehlschlägt + Optional[Dict[str, Any]]: The user data or None if authentication fails """ user = self.get_user_by_username(username) @@ -330,11 +330,11 @@ class GatewayInterface: if not self._verify_password(password, user.get("hashed_password", "")): return None - # Prüfe, ob der Benutzer deaktiviert ist + # Check if the user is disabled if user.get("disabled", False): return None - # Erstelle eine Kopie ohne Passwort-Hash + # Create a copy without password hash authenticated_user = {**user} if "hashed_password" in authenticated_user: del authenticated_user["hashed_password"] @@ -343,112 +343,111 @@ class GatewayInterface: def update_user(self, user_id: int, user_data: Dict[str, Any]) -> Dict[str, Any]: """ - Aktualisiert einen Benutzer + Updates a user Args: - user_id: Die ID des zu aktualisierenden Benutzers - user_data: Die zu aktualisierenden Benutzerdaten + user_id: The ID of the user to update + user_data: The user data to update Returns: - Dict[str, Any]: Die aktualisierten Benutzerdaten + Dict[str, Any]: The updated user data Raises: - ValueError: Wenn der Benutzer nicht gefunden wurde + ValueError: If the user is not found """ - # Hole den aktuellen Benutzer mit Hash-Passwort (direkt aus der DB) + # Get the current user with password hash (directly from DB) users = self.db.get_recordset("users", record_filter={"id": user_id}) if not users: - raise ValueError(f"Benutzer mit ID {user_id} nicht gefunden") + raise ValueError(f"User with ID {user_id} not found") user = users[0] - # Wenn das Passwort geändert werden soll, hashe es + # If the password is being changed, hash it if "password" in user_data: user_data["hashed_password"] = self._get_password_hash(user_data["password"]) del user_data["password"] - # Aktualisiere den Benutzer + # Update the user updated_user = self.db.record_modify("users", user_id, user_data) - # Entferne das Passwort-Hash aus der Rückgabe + # Remove password hash from the response if "hashed_password" in updated_user: del updated_user["hashed_password"] return updated_user def disable_user(self, user_id: int) -> Dict[str, Any]: - """Deaktiviert einen Benutzer""" + """Disables a user""" return self.update_user(user_id, {"disabled": True}) def enable_user(self, user_id: int) -> Dict[str, Any]: - """Aktiviert einen Benutzer""" + """Enables a user""" return self.update_user(user_id, {"disabled": False}) def _delete_user_referenced_data(self, user_id: int) -> None: """ - Löscht alle Daten, die mit einem Benutzer verbunden sind + Deletes all data associated with a user Args: - user_id: Die ID des Benutzers + user_id: The ID of the user """ - # Hier werden alle Tabellen durchsucht und alle Einträge gelöscht, - # die auf diesen Benutzer verweisen + # Here all tables are searched and all entries referencing this user are deleted - # Attribute des Benutzers löschen + # Delete user attributes try: attributes = self.db.get_recordset("attributes", record_filter={"user_id": user_id}) for attribute in attributes: self.db.record_delete("attributes", attribute["id"]) except Exception as e: - logger.error(f"Fehler beim Löschen der Attribute für Benutzer {user_id}: {e}") + logger.error(f"Error deleting attributes for user {user_id}: {e}") - # Weitere Tabellen, die auf den Benutzer verweisen könnten - # (Je nach Datenbankstruktur der Anwendung) + # Other tables that might reference the user + # (Depending on the application's database structure) - logger.info(f"Alle referenzierten Daten für Benutzer {user_id} wurden gelöscht") + logger.info(f"All referenced data for user {user_id} has been deleted") def delete_user(self, user_id: int) -> bool: """ - Löscht einen Benutzer und alle damit verbundenen Daten + Deletes a user and all associated data Args: - user_id: Die ID des zu löschenden Benutzers + user_id: The ID of the user to delete Returns: - bool: True, wenn der Benutzer erfolgreich gelöscht wurde, sonst False + bool: True if the user was successfully deleted, otherwise False """ - # Prüfe, ob der Benutzer existiert + # Check if the user exists users = self.db.get_recordset("users", record_filter={"id": user_id}) if not users: return False - # Prüfe, ob es der initiale Benutzer ist + # Check if it's the initial user initial_user_id = self.get_initial_id("users") if initial_user_id is not None and user_id == initial_user_id: - logger.warning("Versuch, den Root-Admin zu löschen, wurde verhindert") + logger.warning("Attempt to delete the Root Admin was prevented") return False - # Lösche alle mit dem Benutzer verbundenen Daten + # Delete all data associated with the user self._delete_user_referenced_data(user_id) - # Lösche den Benutzer + # Delete the user success = self.db.record_delete("users", user_id) if success: - logger.info(f"Benutzer mit ID {user_id} wurde erfolgreich gelöscht") + logger.info(f"User with ID {user_id} was successfully deleted") else: - logger.error(f"Fehler beim Löschen des Benutzers mit ID {user_id}") + logger.error(f"Error deleting user with ID {user_id}") return success -# Singleton-Factory für GatewayInterface-Instanzen pro Kontext +# Singleton Factory for GatewayInterface instances per context _gateway_interfaces = {} def get_gateway_interface(mandate_id: int = None, user_id: int = None) -> GatewayInterface: """ - Gibt eine GatewayInterface-Instanz für den angegebenen Kontext zurück. - Wiederverwendet bestehende Instanzen. + Returns a GatewayInterface instance for the specified context. + Reuses existing instances. """ context_key = f"{mandate_id}_{user_id}" if context_key not in _gateway_interfaces: diff --git a/modules/gateway_model.py b/modules/gateway_model.py index 976aa146..52f3e0ea 100644 --- a/modules/gateway_model.py +++ b/modules/gateway_model.py @@ -3,91 +3,91 @@ from typing import List, Dict, Any, Optional class Label(BaseModel): - """Label für ein Attribut oder eine Klasse mit Unterstützung für mehrere Sprachen""" + """Label for an attribute or a class with support for multiple languages""" default: str translations: Dict[str, str] = {} def get_label(self, language: str = None): - """Gibt das Label in der angegebenen Sprache zurück, oder den Standardwert wenn nicht verfügbar""" + """Returns the label in the specified language, or the default value if not available""" if language and language in self.translations: return self.translations[language] return self.default class Mandate(BaseModel): - """Datenmodell für einen Mandanten""" - id: int = Field(description="Eindeutige ID des Mandanten") - name: str = Field(description="Name des Mandanten") - language: str = Field(description="Standardsprache des Mandanten") + """Data model for a mandate""" + id: int = Field(description="Unique ID of the mandate") + name: str = Field(description="Name of the mandate") + language: str = Field(description="Default language of the mandate") label: Label = Field( - default=Label(default="Mandant", translations={"en": "Mandate", "fr": "Mandat"}), - description="Label für die Klasse" + default=Label(default="Mandate", translations={"en": "Mandate", "fr": "Mandat"}), + description="Label for the class" ) - # Labels für Attribute + # Labels for attributes field_labels: Dict[str, Label] = { "id": Label(default="ID", translations={}), - "name": Label(default="Name des Mandanten", translations={"en": "Mandate name", "fr": "Nom du mandat"}), - "language": Label(default="Sprache", translations={"en": "Language", "fr": "Langue"}) + "name": Label(default="Name of the mandate", translations={"en": "Mandate name", "fr": "Nom du mandat"}), + "language": Label(default="Language", translations={"en": "Language", "fr": "Langue"}) } class User(BaseModel): - """Datenmodell für einen Benutzer""" - id: int = Field(description="Eindeutige ID des Benutzers") - mandate_id: int = Field(description="ID des zugehörigen Mandanten") - username: str = Field(description="Benutzername für die Anmeldung") - email: Optional[str] = Field(None, description="E-Mail-Adresse des Benutzers") - full_name: Optional[str] = Field(None, description="Vollständiger Name des Benutzers") - language: str = Field(description="Bevorzugte Sprache des Benutzers") - disabled: Optional[bool] = Field(False, description="Gibt an, ob der Benutzer deaktiviert ist") - privilege: str = Field(description="Berechtigungsstufe") #sysadmin,admin,user + """Data model for a user""" + id: int = Field(description="Unique ID of the user") + mandate_id: int = Field(description="ID of the associated mandate") + username: str = Field(description="Username for login") + email: Optional[str] = Field(None, description="Email address of the user") + full_name: Optional[str] = Field(None, description="Full name of the user") + language: str = Field(description="Preferred language of the user") + disabled: Optional[bool] = Field(False, description="Indicates whether the user is disabled") + privilege: str = Field(description="Permission level") #sysadmin,admin,user label: Label = Field( - default=Label(default="Benutzer", translations={"en": "User", "fr": "Utilisateur"}), - description="Label für die Klasse" + default=Label(default="User", translations={"en": "User", "fr": "Utilisateur"}), + description="Label for the class" ) - # Labels für Attribute + # Labels for attributes field_labels: Dict[str, Label] = { "id": Label(default="ID", translations={}), - "mandate_id": Label(default="Mandanten-ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}), - "username": Label(default="Benutzername", translations={"en": "Username", "fr": "Nom d'utilisateur"}), - "email": Label(default="E-Mail", translations={"en": "Email", "fr": "E-mail"}), - "full_name": Label(default="Vollständiger Name", translations={"en": "Full name", "fr": "Nom complet"}), - "language": Label(default="Sprache", translations={"en": "Language", "fr": "Langue"}), - "disabled": Label(default="Deaktiviert", translations={"en": "Disabled", "fr": "Désactivé"}), - "privilege": Label(default="Berechtigungsstufe", translations={"en": "Access level", "fr": "Niveau d'accès"}), + "mandate_id": Label(default="Mandate ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}), + "username": Label(default="Username", translations={"en": "Username", "fr": "Nom d'utilisateur"}), + "email": Label(default="Email", translations={"en": "Email", "fr": "E-mail"}), + "full_name": Label(default="Full name", translations={"en": "Full name", "fr": "Nom complet"}), + "language": Label(default="Language", translations={"en": "Language", "fr": "Langue"}), + "disabled": Label(default="Disabled", translations={"en": "Disabled", "fr": "Désactivé"}), + "privilege": Label(default="Permission level", translations={"en": "Access level", "fr": "Niveau d'accès"}), } class UserInDB(User): - """Erweiterte Benutzerklasse mit Passwort-Hash""" - hashed_password: str = Field(description="Hash des Benutzerpassworts") + """Extended user class with password hash""" + hashed_password: str = Field(description="Hash of the user password") label: Label = Field( - default=Label(default="Benutzer Zugriff", translations={"en": "User Access", "fr": "Accès de l'utilisateur"}), - description="Label für die Klasse" + default=Label(default="User Access", translations={"en": "User Access", "fr": "Accès de l'utilisateur"}), + description="Label for the class" ) - # Zusätzliches Label für das Passwort-Feld + # Additional label for the password field field_labels: Dict[str, Label] = { - "hashed_password": Label(default="Passwort-Hash", translations={"en": "Password hash", "fr": "Hachage de mot de passe"}) + "hashed_password": Label(default="Password hash", translations={"en": "Password hash", "fr": "Hachage de mot de passe"}) } class Token(BaseModel): - """Datenmodell für ein Authentifizierungstoken""" - access_token: str = Field(description="Das ausgestellte Zugriffstoken") - token_type: str = Field(description="Typ des Tokens (meist 'bearer')") + """Data model for an authentication token""" + access_token: str = Field(description="The issued access token") + token_type: str = Field(description="Type of token (usually 'bearer')") label: Label = Field( default=Label(default="Token", translations={"en": "Token", "fr": "Jeton"}), - description="Label für die Klasse" + description="Label for the class" ) - # Labels für Attribute + # Labels for attributes field_labels: Dict[str, Label] = { - "access_token": Label(default="Zugriffstoken", translations={"en": "Access token", "fr": "Jeton d'accès"}), - "token_type": Label(default="Token-Typ", translations={"en": "Token type", "fr": "Type de jeton"}) + "access_token": Label(default="Access token", translations={"en": "Access token", "fr": "Jeton d'accès"}), + "token_type": Label(default="Token type", translations={"en": "Token type", "fr": "Type de jeton"}) } \ No newline at end of file diff --git a/modules/lucydom_interface.py b/modules/lucydom_interface.py index 0b0d70a2..aed9ec44 100644 --- a/modules/lucydom_interface.py +++ b/modules/lucydom_interface.py @@ -1,3 +1,8 @@ +""" +Interface to LucyDOM database. +Uses the JSON connector for data access with added language support. +""" + import os import logging import uuid @@ -36,37 +41,41 @@ class FileDeletionError(FileError): class LucyDOMInterface: """ - Interface zur LucyDOM-Datenbank. - Verwendet den JSON-Konnektor für den Datenzugriff. + Interface to the LucyDOM database. + Uses the JSON connector for data access. """ def __init__(self, mandate_id: int, user_id: int): """ - Initialisiert das LucyDOM-Interface mit Mandanten- und Benutzerkontext. + Initializes the LucyDOM Interface with mandate and user context. Args: - mandate_id: ID des aktuellen Mandanten - user_id: ID des aktuellen Benutzers + mandate_id: ID of the current mandate + user_id: ID of the current user """ self.mandate_id = mandate_id self.user_id = user_id - # Datenmodell-Modul importieren + # Add language settings + self.user_language = "en" # Default user language + self.ai_service = None # Will be set externally + + # Import data model module try: self.model_module = importlib.import_module("modules.lucydom_model") - logger.info("lucydom_model erfolgreich importiert") + logger.info("lucydom_model successfully imported") except ImportError as e: - logger.error(f"Fehler beim Importieren von lucydom_model: {e}") + logger.error(f"Error importing lucydom_model: {e}") raise - # Datenbank initialisieren, falls nötig + # Initialize database if needed self._initialize_database() def _initialize_database(self): """ - Initialisiert die Datenbank mit minimalen Objekten für den angemeldeten Benutzer im Mandanten, falls sie noch nicht existiert. - Ohne gültigen Benutzer keine Initialisierung. - Erstellt für jede im Datenmodell definierte Tabelle einen initialen Datensatz. + Initializes the database with minimal objects for the logged-in user in the mandate, if it doesn't exist yet. + No initialization without a valid user. + Creates an initial dataset for each table defined in the data model. """ effective_mandate_id = self.mandate_id effective_user_id = self.user_id @@ -83,79 +92,125 @@ class LucyDOMInterface: user_id=self.user_id ) - # Initialisierung von Standard-Prompts für verschiedene Bereiche + # Initialize standard prompts for different areas prompts = self.db.get_recordset("prompts") if not prompts: - logger.info("Erstelle Standard-Prompts") + logger.info("Creating standard prompts") - # Standard-Prompts definieren + # Define standard prompts standard_prompts = [ { "mandate_id": effective_mandate_id, "user_id": effective_user_id, - "content": "Recherchiere die aktuellen Markttrends und Entwicklungen im Bereich [THEMA]. Sammle Informationen zu führenden Unternehmen, innovativen Produkten oder Dienstleistungen und aktuellen Herausforderungen. Präsentiere die Ergebnisse in einer strukturierten Übersicht mit relevanten Daten und Quellen.", - "name": "Web Research: Marktforschung" + "content": "Research the current market trends and developments in [TOPIC]. Collect information about leading companies, innovative products or services, and current challenges. Present the results in a structured overview with relevant data and sources.", + "name": "Web Research: Market Research" }, { "mandate_id": effective_mandate_id, "user_id": effective_user_id, - "content": "Analysiere den beigefügten Datensatz zu [THEMA] und identifiziere die wichtigsten Trends, Muster und Auffälligkeiten. Führe statistische Berechnungen durch, um deine Erkenntnisse zu untermauern. Stelle die Ergebnisse in einer klar strukturierten Analyse dar und ziehe relevante Schlussfolgerungen.", - "name": "Analyse: Datenanalyse" + "content": "Analyze the attached dataset on [TOPIC] and identify the most important trends, patterns, and anomalies. Perform statistical calculations to support your findings. Present the results in a clearly structured analysis and draw relevant conclusions.", + "name": "Analysis: Data Analysis" }, { "mandate_id": effective_mandate_id, "user_id": effective_user_id, - "content": "Erstelle ein detailliertes Protokoll unserer Besprechung zum Thema [THEMA]. Erfasse alle besprochenen Punkte, getroffenen Entscheidungen und vereinbarten Maßnahmen. Strukturiere das Protokoll übersichtlich mit Tagesordnungspunkten, Teilnehmerliste und klaren Verantwortlichkeiten für die Follow-up-Aktionen.", - "name": "Protokoll: Besprechungsprotokoll" + "content": "Create a detailed protocol of our meeting on [TOPIC]. Capture all discussed points, decisions made, and agreed measures. Structure the protocol clearly with agenda items, participant list, and clear responsibilities for follow-up actions.", + "name": "Protocol: Meeting Minutes" }, { "mandate_id": effective_mandate_id, "user_id": effective_user_id, - "content": "Entwickle ein UI/UX-Designkonzept für [ANWENDUNG/WEBSITE]. Berücksichtige die Zielgruppe, Hauptfunktionen und die Markenidentität. Beschreibe die visuelle Gestaltung, Navigation, Interaktionsmuster und Informationsarchitektur. Erläutere, wie das Design die Benutzerfreundlichkeit und das Nutzererlebnis optimiert.", + "content": "Develop a UI/UX design concept for [APPLICATION/WEBSITE]. Consider the target audience, main functions, and brand identity. Describe the visual design, navigation, interaction patterns, and information architecture. Explain how the design optimizes user-friendliness and user experience.", "name": "Design: UI/UX Design" } ] - # Prompts erstellen + # Create prompts for prompt_data in standard_prompts: created_prompt = self.db.record_create("prompts", prompt_data) - logger.info(f"Prompt '{prompt_data.get('name', 'Standard')}' wurde erstellt mit ID {created_prompt['id']}") + logger.info(f"Prompt '{prompt_data.get('name', 'Standard')}' was created with ID {created_prompt['id']}") + # Language support methods + + def set_ai_service(self, ai_service): + """Set the AI service for API calls""" + self.ai_service = ai_service + + def set_user_language(self, language_code: str): + """Set the user's preferred language""" + self.user_language = language_code + logger.info(f"User language set to: {language_code}") + + async def call_ai(self, messages: List[Dict[str, str]], + produce_user_answer: bool = False, + temperature: float = None) -> str: + """ + Enhanced AI service call with language support + + Args: + messages: List of message dictionaries + produce_user_answer: Whether this response is for the end-user + temperature: Optional temperature setting + + Returns: + AI response text + """ + if not self.ai_service: + logger.error("AI service not set in LucyDOMInterface") + return "Error: AI service not available" + + # Add language instruction for user-facing responses + if produce_user_answer and self.user_language: + if messages and messages[0]["role"] == "system": + if "language" not in messages[0]["content"].lower(): + messages[0]["content"] = f"Please respond in {self.user_language} language. {messages[0]['content']}" + else: + # Insert a system message with language instruction + messages.insert(0, { + "role": "system", + "content": f"Please respond in {self.user_language} language." + }) + + # Call the AI service + if temperature is not None: + return await self.ai_service.call_api(messages, temperature=temperature) + else: + return await self.ai_service.call_api(messages) # Utilities def get_initial_id(self, table: str) -> Optional[int]: """ - Gibt die initiale ID für eine Tabelle zurück. + Returns the initial ID for a table. Args: - table: Name der Tabelle + table: Name of the table Returns: - Die initiale ID oder None, wenn nicht vorhanden + The initial ID or None if not present """ return self.db.get_initial_id(table) def _get_current_timestamp(self) -> str: - """Gibt den aktuellen Zeitstempel im ISO-Format zurück""" + """Returns the current timestamp in ISO format""" return datetime.now().isoformat() - # Prompt-Methoden + # Prompt methods def get_all_prompts(self) -> List[Dict[str, Any]]: - """Gibt alle Prompts des aktuellen Mandanten zurück""" + """Returns all prompts for the current mandate""" return self.db.get_recordset("prompts") def get_prompt(self, prompt_id: int) -> Optional[Dict[str, Any]]: - """Gibt einen Prompt anhand seiner ID zurück""" + """Returns a prompt by its ID""" prompts = self.db.get_recordset("prompts", record_filter={"id": prompt_id}) if prompts: return prompts[0] return None def create_prompt(self, content: str, name: str) -> Dict[str, Any]: - """Erstellt einen neuen Prompt""" + """Creates a new prompt""" prompt_data = { "mandate_id": self.mandate_id, "user_id": self.user_id, @@ -168,21 +223,21 @@ class LucyDOMInterface: def update_prompt(self, prompt_id: int, content: str = None, name: str = None) -> Dict[str, Any]: """ - Aktualisiert einen vorhandenen Prompt + Updates an existing prompt Args: - prompt_id: ID des zu aktualisierenden Prompts - content: Neuer Inhalt des Prompts + prompt_id: ID of the prompt to update + content: New content for the prompt Returns: - Das aktualisierte Prompt-Objekt + The updated prompt object """ - # Prüfen, ob der Prompt existiert + # Check if the prompt exists prompt = self.get_prompt(prompt_id) if not prompt: return None - # Daten für die Aktualisierung vorbereiten + # Prepare data for update prompt_data = {} if content is not None: @@ -190,18 +245,18 @@ class LucyDOMInterface: if name is not None: prompt_data["name"] = name - # Prompt aktualisieren + # Update prompt return self.db.record_modify("prompts", prompt_id, prompt_data) def delete_prompt(self, prompt_id: int) -> bool: """ - Löscht einen Prompt aus der Datenbank + Deletes a prompt from the database Args: - prompt_id: ID des zu löschenden Prompts + prompt_id: ID of the prompt to delete Returns: - True, wenn der Prompt erfolgreich gelöscht wurde, sonst False + True if the prompt was successfully deleted, otherwise False """ return self.db.record_delete("prompts", prompt_id) @@ -209,18 +264,18 @@ class LucyDOMInterface: # File Utilities def calculate_file_hash(self, file_content: bytes) -> str: - """Berechnet einen SHA-256-Hash für den Dateiinhalt""" + """Calculates a SHA-256 hash for the file content""" return hashlib.sha256(file_content).hexdigest() def check_for_duplicate_file(self, file_hash: str) -> Optional[Dict[str, Any]]: - """Prüft, ob bereits eine Datei mit demselben Hash existiert""" + """Checks if a file with the same hash already exists""" files = self.db.get_recordset("files", record_filter={"file_hash": file_hash}) if files: return files[0] return None def get_mime_type(self, filename: str) -> str: - """Ermittelt den MIME-Typ basierend auf der Dateiendung""" + """Determines the MIME type based on the file extension""" import os ext = os.path.splitext(filename)[1].lower()[1:] extension_to_mime = { @@ -250,27 +305,27 @@ class LucyDOMInterface: return extension_to_mime.get(ext.lower(), "application/octet-stream") - # File Methoden - Metadaten-basierte Operationen + # File methods - metadata-based operations def get_all_files(self) -> List[Dict[str, Any]]: """ - Gibt alle Dateien des aktuellen Mandanten zurück ohne Binärdaten. + Returns all files for the current mandate without binary data. Returns: - Liste von FileItem-Objekten ohne Binärdaten + List of FileItem objects without binary data """ files = self.db.get_recordset("files") return files def get_file(self, file_id: int) -> Optional[Dict[str, Any]]: """ - Gibt eine Datei anhand ihrer ID zurück, ohne Binärdaten. + Returns a file by its ID, without binary data. Args: - file_id: ID der gesuchten Datei + file_id: ID of the file Returns: - FileItem ohne Binärdaten oder None, wenn nicht gefunden + FileItem without binary data or None if not found """ files = self.db.get_recordset("files", record_filter={"id": file_id}) if files: @@ -279,17 +334,17 @@ class LucyDOMInterface: def create_file(self, name: str, mime_type: str, size: int = None, file_hash: str = None) -> Dict[str, Any]: """ - Erstellt einen neuen Dateieintrag in der Datenbank ohne Inhalt. - Der eigentliche Dateiinhalt wird separat in der FileData-Tabelle gespeichert. + Creates a new file entry in the database without content. + The actual file content is stored separately in the FileData table. Args: - name: Name der Datei - mime_type: MIME-Typ der Datei - size: Größe der Datei in Bytes - file_hash: Hash-Wert der Datei für Deduplizierung + name: Name of the file + mime_type: MIME type of the file + size: Size of the file in bytes + file_hash: Hash value of the file for deduplication Returns: - Das erstellte FileItem-Objekt + The created FileItem object """ file_data = { "mandate_id": self.mandate_id, @@ -304,43 +359,43 @@ class LucyDOMInterface: def update_file(self, file_id: int, update_data: Dict[str, Any]) -> Dict[str, Any]: """ - Aktualisiert die Metadaten einer vorhandenen Datei ohne die Binärdaten zu beeinflussen. + Updates the metadata of an existing file without affecting the binary data. Args: - file_id: ID der zu aktualisierenden Datei - update_data: Dictionary mit zu aktualisierenden Feldern + file_id: ID of the file to update + update_data: Dictionary with fields to update Returns: - Das aktualisierte FileItem-Objekt + The updated FileItem object """ - # Prüfen, ob die Datei existiert + # Check if the file exists file = self.get_file(file_id) if not file: - raise FileNotFoundError(f"Datei mit ID {file_id} nicht gefunden") + raise FileNotFoundError(f"File with ID {file_id} not found") - # Datei aktualisieren + # Update file return self.db.record_modify("files", file_id, update_data) def delete_file(self, file_id: int) -> bool: """ - Löscht eine Datei aus der Datenbank (Metadaten und Inhalt). + Deletes a file from the database (metadata and content). Args: - file_id: ID der Datei + file_id: ID of the file Returns: - True bei Erfolg, False bei Fehler + True on success, False on error """ try: - # Suche die Datei in der Datenbank + # Find the file in the database file = self.get_file(file_id) if not file: - raise FileNotFoundError(f"Datei mit ID {file_id} nicht gefunden") + raise FileNotFoundError(f"File with ID {file_id} not found") - # Prüfe, ob die Datei zum aktuellen Mandanten gehört + # Check if the file belongs to the current mandate if file.get("mandate_id") != self.mandate_id: - raise FilePermissionError(f"Keine Berechtigung zum Löschen der Datei {file_id}") + raise FilePermissionError(f"No permission to delete file {file_id}") # Check for other references to this file (by hash) file_hash = file.get("file_hash") @@ -350,18 +405,18 @@ class LucyDOMInterface: # If other files reference this content, only delete the database entry for FileItem if other_references: - logger.info(f"Andere Referenzen auf den Dateiinhalt gefunden, nur FileItem wird gelöscht: {file_id}") + logger.info(f"Other references to the file content found, only FileItem will be deleted: {file_id}") else: - # Lösche auch den Dateiinhalt in der FileData-Tabelle + # Also delete the file content in the FileData table try: file_data_entries = self.db.get_recordset("file_data", record_filter={"id": file_id}) if file_data_entries: self.db.record_delete("file_data", file_id) - logger.info(f"FileData für Datei {file_id} gelöscht") + logger.info(f"FileData for file {file_id} deleted") except Exception as e: - logger.warning(f"Fehler beim Löschen des FileData für Datei {file_id}: {str(e)}") + logger.warning(f"Error deleting FileData for file {file_id}: {str(e)}") - # Lösche den FileItem-Eintrag + # Delete the FileItem entry return self.db.record_delete("files", file_id) except FileNotFoundError as e: @@ -371,22 +426,22 @@ class LucyDOMInterface: # Pass through FilePermissionError raise except Exception as e: - logger.error(f"Fehler beim Löschen der Datei {file_id}: {str(e)}") - raise FileDeletionError(f"Fehler beim Löschen der Datei: {str(e)}") + logger.error(f"Error deleting file {file_id}: {str(e)}") + raise FileDeletionError(f"Error deleting file: {str(e)}") - # FileData Methoden - Binärdaten-basierte Operationen + # FileData methods - binary data operations def create_file_data(self, file_id: int, data: bytes) -> bool: """ - Speichert die Binärdaten einer Datei in der Datenbank als Base64-String. + Stores the binary data of a file in the database as a Base64 string. Args: - file_id: ID der zugehörigen Datei - data: Binärdaten + file_id: ID of the associated file + data: Binary data Returns: - True bei Erfolg, False bei Fehler + True on success, False on error """ try: import base64 @@ -425,19 +480,19 @@ class LucyDOMInterface: logger.info(f"Successfully stored encoded data for file {file_id}") return True except Exception as e: - logger.error(f"Fehler beim Speichern der Binärdaten für Datei {file_id}: {str(e)}") + logger.error(f"Error storing binary data for file {file_id}: {str(e)}") return False def get_file_data(self, file_id: int) -> Optional[bytes]: """ - Gibt die Binärdaten einer Datei zurück. - Konvertiert Base64-String aus der Datenbank zurück zu bytes. + Returns the binary data of a file. + Converts Base64 string from the database back to bytes. Args: - file_id: ID der Datei + file_id: ID of the file Returns: - Binärdaten oder None, wenn nicht gefunden + Binary data or None if not found """ import base64 @@ -473,15 +528,15 @@ class LucyDOMInterface: def update_file_data(self, file_id: int, data: Union[bytes, str]) -> bool: """ - Aktualisiert die Binärdaten einer Datei in der Datenbank. - Konvertiert bytes zu Base64-String für die Speicherung. + Updates the binary data of a file in the database. + Converts bytes to Base64 string for storage. Args: - file_id: ID der Datei - data: Neue Binärdaten oder kodierte Daten + file_id: ID of the file + data: New binary data or encoded data Returns: - True bei Erfolg, False bei Fehler + True on success, False on error """ try: import base64 @@ -525,21 +580,21 @@ class LucyDOMInterface: return True except Exception as e: - logger.error(f"Fehler beim Aktualisieren der Binärdaten für Datei {file_id}: {str(e)}") + logger.error(f"Error updating binary data for file {file_id}: {str(e)}") return False def save_uploaded_file(self, file_content: bytes, file_name: str) -> Dict[str, Any]: """ - Speichert eine hochgeladene Datei in der Datenbank. - Metadaten werden in der 'files'-Tabelle gespeichert, - Binärdaten in der 'file_data'-Tabelle als Base64-String. + Saves an uploaded file in the database. + Metadata is stored in the 'files' table, + Binary data in the 'file_data' table as a Base64 string. Args: - file_content: Binärdaten der Datei - file_name: Name der Datei + file_content: Binary data of the file + file_name: Name of the file Returns: - Dictionary mit Metadaten der gespeicherten Datei + Dictionary with metadata of the saved file """ try: # Debug: Log the start of the file upload process @@ -558,16 +613,16 @@ class LucyDOMInterface: existing_file = self.check_for_duplicate_file(file_hash) if existing_file: # Simply return the existing file metadata - logger.info(f"Duplikat gefunden für {file_name}: {existing_file['id']}") + logger.info(f"Duplicate found for {file_name}: {existing_file['id']}") return existing_file - # MIME-Typ bestimmen + # Determine MIME type mime_type = self.get_mime_type(file_name) - # Dateigröße bestimmen + # Determine file size file_size = len(file_content) - # 1. Speichere Metadaten in der 'files'-Tabelle + # 1. Save metadata in the 'files' table logger.info(f"Saving file metadata to database for file: {file_name}") db_file = self.create_file( name=file_name, @@ -576,7 +631,7 @@ class LucyDOMInterface: file_hash=file_hash ) - # 2. Speichere Binärdaten als Base64-String in der 'file_data'-Tabelle + # 2. Save binary data as Base64 string in the 'file_data' table logger.info(f"Saving file content to database for file: {file_name}") self.create_file_data(db_file["id"], file_content) @@ -594,30 +649,30 @@ class LucyDOMInterface: except Exception as e: logger.error(f"Error in save_uploaded_file for {file_name}: {str(e)}", exc_info=True) - raise FileStorageError(f"Fehler beim Speichern der Datei: {str(e)}") + raise FileStorageError(f"Error saving file: {str(e)}") def download_file(self, file_id: int) -> Optional[Dict[str, Any]]: """ - Gibt eine Datei zum Download zurück, einschließlich Binärdaten. + Returns a file for download, including binary data. Args: - file_id: ID der Datei + file_id: ID of the file Returns: - Dictionary mit Dateidaten und -metadaten oder None, wenn nicht gefunden + Dictionary with file data and metadata or None if not found """ try: - # 1. Metadaten aus der 'files'-Tabelle holen + # 1. Get metadata from the 'files' table file = self.get_file(file_id) if not file: - raise FileNotFoundError(f"Datei mit ID {file_id} nicht gefunden") + raise FileNotFoundError(f"File with ID {file_id} not found") - # 2. Binärdaten aus der 'file_data'-Tabelle holen + # 2. Get binary data from the 'file_data' table file_content = self.get_file_data(file_id) if file_content is None: - raise FileNotFoundError(f"Binärdaten für Datei mit ID {file_id} nicht gefunden") + raise FileNotFoundError(f"Binary data for file with ID {file_id} not found") return { "id": file_id, @@ -630,41 +685,41 @@ class LucyDOMInterface: # Re-raise FileNotFoundError as is raise except Exception as e: - logger.error(f"Fehler beim Herunterladen der Datei {file_id}: {str(e)}") - raise FileError(f"Fehler beim Herunterladen der Datei: {str(e)}") + logger.error(f"Error downloading file {file_id}: {str(e)}") + raise FileError(f"Error downloading file: {str(e)}") def _export_file_to_static(self, file_content: bytes, file_id: int, file_name: str): debug_filename = f"{file_id}_{file_name}" with open(f"./static/{debug_filename}", 'wb') as f: f.write(file_content) - # Workflow Methoden + # Workflow methods def get_all_workflows(self) -> List[Dict[str, Any]]: - """Gibt alle Workflows des aktuellen Mandanten zurück""" + """Returns all workflows for the current mandate""" return self.db.get_recordset("workflows") def get_workflows_by_user(self, user_id: int) -> List[Dict[str, Any]]: - """Gibt alle Workflows eines Benutzers zurück""" + """Returns all workflows for a user""" return self.db.get_recordset("workflows", record_filter={"user_id": user_id}) def get_workflow(self, workflow_id: str) -> Optional[Dict[str, Any]]: - """Gibt einen Workflow anhand seiner ID zurück""" + """Returns a workflow by its ID""" workflows = self.db.get_recordset("workflows", record_filter={"id": workflow_id}) if workflows: return workflows[0] return None def create_workflow(self, workflow_data: Dict[str, Any]) -> Dict[str, Any]: - """Erstellt einen neuen Workflow in der Datenbank""" - # Stellen Sie sicher, dass mandate_id und user_id gesetzt sind + """Creates a new workflow in the database""" + # Make sure mandate_id and user_id are set if "mandate_id" not in workflow_data: workflow_data["mandate_id"] = self.mandate_id if "user_id" not in workflow_data: workflow_data["user_id"] = self.user_id - # Zeitstempel setzen, falls nicht vorhanden + # Set timestamp if not present current_time = self._get_current_timestamp() if "started_at" not in workflow_data: workflow_data["started_at"] = current_time @@ -676,54 +731,54 @@ class LucyDOMInterface: def update_workflow(self, workflow_id: str, workflow_data: Dict[str, Any]) -> Dict[str, Any]: """ - Aktualisiert einen vorhandenen Workflow. + Updates an existing workflow. Args: - workflow_id: ID des zu aktualisierenden Workflows - workflow_data: Neue Daten für den Workflow + workflow_id: ID of the workflow to update + workflow_data: New data for the workflow Returns: - Das aktualisierte Workflow-Objekt + The updated workflow object """ - # Prüfen, ob der Workflow existiert + # Check if the workflow exists workflow = self.get_workflow(workflow_id) if not workflow: return None - # Aktualisierungszeit setzen + # Set update time workflow_data["last_activity"] = self._get_current_timestamp() - # Workflow aktualisieren + # Update workflow return self.db.record_modify("workflows", workflow_id, workflow_data) def delete_workflow(self, workflow_id: str) -> bool: """ - Löscht einen Workflow aus der Datenbank. + Deletes a workflow from the database. Args: - workflow_id: ID des zu löschenden Workflows + workflow_id: ID of the workflow to delete Returns: - True bei Erfolg, False wenn der Workflow nicht existiert + True on success, False if the workflow doesn't exist """ - # Prüfen, ob der Workflow existiert + # Check if the workflow exists workflow = self.get_workflow(workflow_id) if not workflow: return False - # Prüfen, ob der Benutzer der Eigentümer ist oder Admin-Rechte hat + # Check if the user is the owner or has admin rights if workflow.get("user_id") != self.user_id: - # Hier könnte eine Prüfung auf Admin-Rechte erfolgen + # Here could be a check for admin rights return False - # Workflow löschen + # Delete workflow return self.db.record_delete("workflows", workflow_id) # Workflow Messages def get_workflow_messages(self, workflow_id: str) -> List[Dict[str, Any]]: - """Gibt alle Nachrichten eines Workflows zurück""" + """Returns all messages of a workflow""" return self.db.get_recordset("workflow_messages", record_filter={"workflow_id": workflow_id}) def create_workflow_message(self, message_data: Dict[str, Any]) -> Dict[str, Any]: @@ -865,43 +920,43 @@ class LucyDOMInterface: def delete_workflow_message(self, workflow_id: str, message_id: str) -> bool: """ - Löscht eine Nachricht aus einem Workflow in der Datenbank. + Deletes a message from a workflow in the database. Args: - workflow_id: ID des zugehörigen Workflows - message_id: ID der zu löschenden Nachricht + workflow_id: ID of the associated workflow + message_id: ID of the message to delete Returns: - True bei Erfolg, False bei Fehler + True on success, False on error """ try: - # Prüfen, ob die Nachricht existiert + # Check if the message exists messages = self.get_workflow_messages(workflow_id) message = next((m for m in messages if m.get("id") == message_id), None) if not message: - logger.warning(f"Nachricht {message_id} für Workflow {workflow_id} nicht gefunden") + logger.warning(f"Message {message_id} for workflow {workflow_id} not found") return False - # Nachricht aus der Datenbank löschen + # Delete the message from the database return self.db.record_delete("workflow_messages", message_id) except Exception as e: - logger.error(f"Fehler beim Löschen der Nachricht {message_id}: {str(e)}") + logger.error(f"Error deleting message {message_id}: {str(e)}") return False def delete_file_from_message(self, workflow_id: str, message_id: str, file_id: int) -> bool: """ - Entfernt eine Dateireferenz aus einer Nachricht. - Die Datei selbst wird nicht gelöscht, nur die Referenz in der Nachricht. + Removes a file reference from a message. + The file itself is not deleted, only the reference in the message. Enhanced version with improved file matching. Args: - workflow_id: ID des zugehörigen Workflows - message_id: ID der Nachricht - file_id: ID der zu entfernenden Datei + workflow_id: ID of the associated workflow + message_id: ID of the message + file_id: ID of the file to remove Returns: - True bei Erfolg, False bei Fehler + True on success, False on error """ try: # Log operation @@ -996,15 +1051,32 @@ class LucyDOMInterface: # Workflow Logs def get_workflow_logs(self, workflow_id: str) -> List[Dict[str, Any]]: - """Gibt alle Log-Einträge eines Workflows zurück""" + """Returns all log entries for a workflow""" return self.db.get_recordset("workflow_logs", record_filter={"workflow_id": workflow_id}) def create_workflow_log(self, log_data: Dict[str, Any]) -> Dict[str, Any]: - """Erstellt einen neuen Log-Eintrag für einen Workflow""" - # Stellen Sie sicher, dass die benötigten Felder vorhanden sind + """Creates a new log entry for a workflow""" + # Make sure required fields are present if "timestamp" not in log_data: log_data["timestamp"] = self._get_current_timestamp() + # Add status information if not present + if "status" not in log_data and "type" in log_data: + if log_data["type"] == "error": + log_data["status"] = "error" + else: + log_data["status"] = "running" + + # Add progress information if not present + if "progress" not in log_data: + # Default progress values based on log type + if log_data.get("type") == "info": + log_data["progress"] = 50 # Default middle progress + elif log_data.get("type") == "error": + log_data["progress"] = -1 # Error state + elif log_data.get("type") == "warning": + log_data["progress"] = 50 # Default middle progress + return self.db.record_create("workflow_logs", log_data) @@ -1102,8 +1174,9 @@ class LucyDOMInterface: "message": log.get("message", ""), "type": log.get("type", "info"), "timestamp": log.get("timestamp", self._get_current_timestamp()), - "agent_id": log.get("agent_id"), - "agent_name": log.get("agent_name") + "agent_name": log.get("agent_name", GLOBAL_SETTINGS.get("system_name", "AI Assistant")), + "status": log.get("status", "running"), + "progress": log.get("progress", 50) } # Create or update log @@ -1177,13 +1250,25 @@ class LucyDOMInterface: logger.error(f"Error loading workflow state: {str(e)}") return None -# Singleton-Factory für LucyDOMInterface-Instanzen pro Kontext +# Global settings for the LucyDOM interface +GLOBAL_SETTINGS = { + "system_name": "AI Assistant", # Default system name for logs + "workflow_status_messages": { + "init": "Workflow initialized", + "running": "Running workflow", + "waiting": "Waiting for input", + "completed": "Workflow completed", + "error": "Error in workflow" + } +} + +# Singleton factory for LucyDOMInterface instances per context _lucydom_interfaces = {} def get_lucydom_interface(mandate_id: int = 0, user_id: int = 0) -> LucyDOMInterface: """ - Gibt eine LucyDOMInterface-Instanz für den angegebenen Kontext zurück. - Wiederverwendet bestehende Instanzen. + Returns a LucyDOMInterface instance for the specified context. + Reuses existing instances. """ context_key = f"{mandate_id}_{user_id}" if context_key not in _lucydom_interfaces: diff --git a/notes/changelog.txt b/notes/changelog.txt index d1be7304..5bd7b8b3 100644 --- a/notes/changelog.txt +++ b/notes/changelog.txt @@ -1,27 +1,6 @@ ....................... TASKS -can you do following adaptions for the workflow management for the frontend: -- german comments in logs and prompts to translate to english. where to adapt what? -- can you enhance all ai prompts to include, that the output is delivered in the language of the user? Perhaps an option to have a global variable for this, which is also transferred with the task to the agents? Perhaps to do simple ai call with some words to ask AI? I want a solution with minimum impact to the code and simple to use. -- can you check all self.log_add(...) statements and rearrange them. They are for the progress of a workflow to show in the front-end. I want all messages to be in a standardizes format and organized along the workflow, that user understands the logical progress. Not too much information, but the relevant steps to show. Within loops to tell progress in percent by having a log_add in the loops (so to add progress attribute to the function call) - -topics for log_add log_entry object: -- always to include workflow "status" for frontend polling support -- agent_id to remove -- agent_name to take from a global variable in the according module (same global variables set like global user language) -- add atrribute to show progress - -please deliver adapted modules when more than 3 parts have to be adapted, otherwise the parts to adapt. - - - - - - - - - FRONTEND: - General: Adapt to backend changes and simplify polling and frontend objects status, remove unnecessary elements. - Workflow object has only one attribute for status, this is "status" with value "completed" or "running". All other status objects for workflow to remove. @@ -60,6 +39,18 @@ frontend: no labels definition ----------------------- DONE + + + +can you do following adaptions for the workflow management for the frontend: +- german comments in logs and prompts to translate to english. where to adapt what? +- ai calls to adapt for user language if necessary (additional parameter in the lucydom ai call) + +- can you check all self.log_add(...) statements and rearrange them for the revised function call. They are for the progress of a workflow to show in the front-end. I want all messages to be in a standardizes format and organized along the workflow, that user understands the logical progress. Not too much information, but the relevant steps to show. Within loops to tell progress in percent by having a log_add in the loops (so to add progress attribute to the function call) + +please deliver adapted modules when more than 3 parts have to be adapted, otherwise the parts to adapt. + + can you do following adaptions for document class: diff --git a/routes/attributes.py b/routes/attributes.py index 406a3896..5cbcb676 100644 --- a/routes/attributes.py +++ b/routes/attributes.py @@ -4,10 +4,10 @@ from fastapi import status from modules.auth import get_current_active_user, get_user_context -# Importiere die Attributdefinition und Hilfsfunktionen +# Import the attribute definition and helper functions from gateway.modules.def_attributes import AttributeDefinition, get_model_attributes -# Importiere die Modellmodule (ohne spezifische Klassen) +# Import the model modules (without specific classes) import modules.gateway_model as gateway_model import modules.lucydom_model as lucydom_model @@ -30,7 +30,7 @@ model_classes = { } -# Erstelle einen Router für die Attribute-Endpunkte +# Create a router for the attribute endpoints router = APIRouter( prefix="/api/attributes", tags=["Attributes"], @@ -39,36 +39,35 @@ router = APIRouter( @router.get("/{entity_type}", response_model=List[AttributeDefinition]) async def get_entity_attributes( - entity_type: str = Path(..., description="Typ der Entität (z.B. prompt)"), + entity_type: str = Path(..., description="Type of entity (e.g. prompt)"), current_user: Dict[str, Any] = Depends(get_current_active_user) ): """ - Ruft die Attributdefinitionen für eine bestimmte Entität ab. - Dies kann für die dynamische Generierung von Formularen verwendet werden. + Retrieves the attribute definitions for a specific entity. + This can be used for dynamic form generation. """ - # Authentifizierung und Benutzerkontext + # Authentication and user context mandate_id, user_id = await get_user_context(current_user) - # Bevorzugte Sprache des Benutzers ermitteln + # Determine preferred language of the user user_language = current_user.get("language", "de") - # Prüfen, ob Entitätstyp bekannt ist + # Check if entity type is known if entity_type not in model_classes: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Entitätstyp '{entity_type}' nicht gefunden." + detail=f"Entity type '{entity_type}' not found." ) - # Model-Klasse abrufen und Attribute daraus ableiten + # Get model class and derive attributes from it model_class = model_classes[entity_type] attributes = get_model_attributes(model_class, user_language) - # Nur bearbeitbare und sichtbare Attribute zurückgeben + # Return only editable and visible attributes visible_attributes = [attr for attr in attributes if attr.visible] @router.options("/{entity_type}") async def options_entity_attributes( - entity_type: str = Path(..., description="Typ der Entität (z.B. prompt)") + entity_type: str = Path(..., description="Type of entity (e.g. prompt)") ): - return Response(status_code=200) - + return Response(status_code=200) \ No newline at end of file diff --git a/routes/mandates.py b/routes/mandates.py index 5b01d92a..e89b486a 100644 --- a/routes/mandates.py +++ b/routes/mandates.py @@ -9,7 +9,7 @@ from modules.auth import get_current_active_user, get_user_context from modules.gateway_interface import get_gateway_interface from modules.gateway_model import Mandate -# Alle Attribute des Models ermitteln (außer interne/spezielle Attribute) +# Determine all attributes of the model (except internal/special attributes) def get_model_attributes(model_class): return [attr for attr in dir(model_class) if not callable(getattr(model_class, attr)) @@ -20,25 +20,25 @@ def get_model_attributes(model_class): and attr != 'label' and attr != 'field_labels'] -# Modell-Attribute für Mandate +# Model attributes for Mandate mandate_attributes = get_model_attributes(Mandate) @dataclass class AppContext: - """Kontext-Objekt für alle benötigten Verbindungen und Benutzerinformationen""" + """Context object for all required connections and user information""" mandate_id: int user_id: int interface_data: Any # Gateway Interface async def get_context(current_user: Dict[str, Any]) -> AppContext: """ - Erstellt ein zentrales Kontext-Objekt mit allen benötigten Interfaces + Creates a central context object with all required interfaces Args: - current_user: Aktueller Benutzer aus der Authentifizierung + current_user: Current user from authentication Returns: - AppContext-Objekt mit allen benötigten Verbindungen + AppContext object with all required connections """ mandate_id, user_id = await get_user_context(current_user) interface_data = get_gateway_interface(mandate_id, user_id) @@ -49,7 +49,7 @@ async def get_context(current_user: Dict[str, Any]) -> AppContext: interface_data=interface_data ) -# Router für Mandanten-Endpunkte erstellen +# Create router for mandate endpoints router = APIRouter( prefix="/api/mandates", tags=["Mandates"], @@ -58,17 +58,17 @@ router = APIRouter( @router.get("", response_model=List[Dict[str, Any]]) async def get_mandates(current_user: Dict[str, Any] = Depends(get_current_active_user)): - """Alle verfügbaren Mandanten abrufen (nur für SysAdmin-Benutzer)""" + """Get all available mandates (only for SysAdmin users)""" context = await get_context(current_user) - # Berechtigungsprüfung + # Permission check if current_user.get("privilege") != "sysadmin": raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Nur System-Administratoren können alle Mandanten abrufen" + detail="Only system administrators can access all mandates" ) - # Mandanten generisch abrufen + # Get mandates generically return context.interface_data.get_all_mandates() @@ -77,29 +77,29 @@ async def create_mandate( mandate: Dict[str, Any] = Body(...), current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen neuen Mandanten erstellen (nur für SysAdmin-Benutzer)""" + """Create a new mandate (only for SysAdmin users)""" context = await get_context(current_user) - # Berechtigungsprüfung + # Permission check if current_user.get("privilege") != "sysadmin": raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Nur System-Administratoren können Mandanten erstellen" + detail="Only system administrators can create mandates" ) - # Attribute aus dem Request dynamisch setzen + # Set attributes from the request dynamically mandate_data = {} for attr in mandate_attributes: if attr in mandate: mandate_data[attr] = mandate[attr] - # Standardwerte für fehlende Felder + # Default values for missing fields if "name" not in mandate_data: - mandate_data["name"] = "Neuer Mandant" + mandate_data["name"] = "New Mandate" if "language" not in mandate_data: mandate_data["language"] = "de" - # Mandant erstellen + # Create mandate new_mandate = context.interface_data.create_mandate(**mandate_data) return new_mandate @@ -110,11 +110,11 @@ async def get_mandate( mandate_id: int, current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen bestimmten Mandanten abrufen""" + """Get a specific mandate""" context = await get_context(current_user) - # Berechtigungsprüfung - # Admin darf nur seinen eigenen Mandanten sehen, SysAdmin alle + # Permission check + # Admin can only see their own mandate, SysAdmin can see all is_admin = current_user.get("privilege") == "admin" is_sysadmin = current_user.get("privilege") == "sysadmin" is_own_mandate = context.mandate_id == mandate_id @@ -122,37 +122,37 @@ async def get_mandate( if (is_admin and not is_own_mandate) and not is_sysadmin: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Abrufen dieses Mandanten" + detail="No permission to access this mandate" ) - # Mandant generisch abrufen + # Get mandate generically mandate = context.interface_data.get_mandate(mandate_id) if not mandate: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Mandant mit ID {mandate_id} nicht gefunden" + detail=f"Mandate with ID {mandate_id} not found" ) return mandate @router.put("/{mandate_id}", response_model=Dict[str, Any]) async def update_mandate( - mandate_id: int = Path(..., description="ID des zu aktualisierenden Mandanten"), - mandate_data: Dict[str, Any] = Body(..., description="Aktualisierte Mandantendaten"), + mandate_id: int = Path(..., description="ID of the mandate to update"), + mandate_data: Dict[str, Any] = Body(..., description="Updated mandate data"), current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen bestehenden Mandanten aktualisieren""" + """Update an existing mandate""" context = await get_context(current_user) - # Mandant existiert? + # Mandate exists? mandate = context.interface_data.get_mandate(mandate_id) if not mandate: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Mandant mit ID {mandate_id} nicht gefunden" + detail=f"Mandate with ID {mandate_id} not found" ) - # Berechtigungsprüfung + # Permission check is_admin = current_user.get("privilege") == "admin" is_sysadmin = current_user.get("privilege") == "sysadmin" is_own_mandate = context.mandate_id == mandate_id @@ -160,16 +160,16 @@ async def update_mandate( if (is_admin and not is_own_mandate) and not is_sysadmin: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Aktualisieren dieses Mandanten" + detail="No permission to update this mandate" ) - # Attribute aus dem Request dynamisch in update_data filtern + # Dynamically filter attributes from the request into update_data update_data = {} for attr in mandate_attributes: if attr in mandate_data: update_data[attr] = mandate_data[attr] - # Mandant aktualisieren + # Update mandate updated_mandate = context.interface_data.update_mandate( mandate_id=mandate_id, mandate_data=update_data @@ -179,21 +179,21 @@ async def update_mandate( @router.delete("/{mandate_id}", status_code=status.HTTP_204_NO_CONTENT) async def delete_mandate( - mandate_id: int = Path(..., description="ID des zu löschenden Mandanten"), + mandate_id: int = Path(..., description="ID of the mandate to delete"), current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen Mandanten löschen, inklusive aller zugehörigen Benutzer und referenzierten Objekte""" + """Delete a mandate, including all associated users and referenced objects""" context = await get_context(current_user) - # Mandant existiert? + # Mandate exists? mandate = context.interface_data.get_mandate(mandate_id) if not mandate: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Mandant mit ID {mandate_id} nicht gefunden" + detail=f"Mandate with ID {mandate_id} not found" ) - # Berechtigungsprüfung + # Permission check is_admin = current_user.get("privilege") == "admin" is_sysadmin = current_user.get("privilege") == "sysadmin" is_own_mandate = context.mandate_id == mandate_id @@ -201,16 +201,16 @@ async def delete_mandate( if (is_admin and not is_own_mandate) and not is_sysadmin: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Löschen dieses Mandanten" + detail="No permission to delete this mandate" ) - # Mandant löschen + # Delete mandate success = context.interface_data.delete_mandate(mandate_id) if not success: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Fehler beim Löschen des Mandanten mit ID {mandate_id}" + detail=f"Error deleting mandate with ID {mandate_id}" ) - # Kein Inhalt zurückgeben bei erfolgreichem Löschen + # Return no content on successful deletion return None \ No newline at end of file diff --git a/routes/users.py b/routes/users.py index f60ea108..3f88fb31 100644 --- a/routes/users.py +++ b/routes/users.py @@ -11,7 +11,7 @@ from modules.auth import get_current_active_user, get_user_context from modules.gateway_interface import get_gateway_interface from modules.gateway_model import User -# Alle Attribute des Models ermitteln (außer interne/spezielle Attribute) +# Determine all attributes of the model (except internal/special attributes) def get_model_attributes(model_class): return [attr for attr in dir(model_class) if not callable(getattr(model_class, attr)) @@ -22,25 +22,25 @@ def get_model_attributes(model_class): and attr != 'label' and attr != 'field_labels'] -# Modell-Attribute für User +# Model attributes for User user_attributes = get_model_attributes(User) @dataclass class AppContext: - """Kontext-Objekt für alle benötigten Verbindungen und Benutzerinformationen""" + """Context object for all required connections and user information""" mandate_id: int user_id: int interface_data: Any # Gateway Interface async def get_context(current_user: Dict[str, Any]) -> AppContext: """ - Erstellt ein zentrales Kontext-Objekt mit allen benötigten Interfaces + Creates a central context object with all required interfaces Args: - current_user: Aktueller Benutzer aus der Authentifizierung + current_user: Current user from authentication Returns: - AppContext-Objekt mit allen benötigten Verbindungen + AppContext object with all required connections """ mandate_id, user_id = await get_user_context(current_user) interface_data = get_gateway_interface(mandate_id, user_id) @@ -51,7 +51,7 @@ async def get_context(current_user: Dict[str, Any]) -> AppContext: interface_data=interface_data ) -# Router für Benutzer-Endpunkte erstellen +# Create router for user endpoints router = APIRouter( prefix="/api/users", tags=["Users"], @@ -60,17 +60,17 @@ router = APIRouter( @router.get("", response_model=List[Dict[str, Any]]) async def get_users(current_user: Dict[str, Any] = Depends(get_current_active_user)): - """Alle verfügbaren Benutzer abrufen (nur für Admin/SysAdmin-Benutzer)""" + """Get all available users (only for Admin/SysAdmin users)""" context = await get_context(current_user) - # Berechtigungsprüfung + # Permission check if current_user.get("privilege") not in ["admin", "sysadmin"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Abrufen der Benutzerliste" + detail="No permission to access the user list" ) - # Admin sieht nur Benutzer des eigenen Mandanten, SysAdmin sieht alle + # Admin sees only users of own mandate, SysAdmin sees all if current_user.get("privilege") == "admin": return context.interface_data.get_users_by_mandate(context.mandate_id) else: # sysadmin @@ -78,34 +78,34 @@ async def get_users(current_user: Dict[str, Any] = Depends(get_current_active_us @router.post("/register", response_model=Dict[str, Any]) async def register_user(user_data: dict = Body(...)): - """Neuen Benutzer registrieren""" - # Bei der Registrierung keinen Benutzerkontext verwenden + """Register a new user""" + # Don't use user context for registration gateway = get_gateway_interface() if "username" not in user_data or "password" not in user_data: - raise HTTPException(status_code=400, detail="Benutzername und Passwort erforderlich") + raise HTTPException(status_code=400, detail="Username and password required") try: - # Attribute dynamisch filtern + # Dynamically filter attributes mandate_data = { - "name": f"Mandant von {user_data['username']}", + "name": f"Mandate of {user_data['username']}", "language": user_data.get("language", "de") } new_mandate = gateway.create_mandate(**mandate_data) - # User-Attribute aus dem Request filtern + # Filter user attributes from the request user_create_data = {} for attr in user_attributes: - if attr in user_data and attr not in ["id"]: # ID wird automatisch vergeben + if attr in user_data and attr not in ["id"]: # ID is auto-assigned user_create_data[attr] = user_data[attr] - # Pflichtfelder + # Required fields user_create_data["username"] = user_data["username"] user_create_data["password"] = user_data["password"] user_create_data["mandate_id"] = new_mandate["id"] - # Standardwerte für optionale Felder + # Default values for optional fields if "disabled" not in user_create_data: user_create_data["disabled"] = False if "privilege" not in user_create_data: @@ -123,143 +123,143 @@ async def get_user( user_id: int, current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen bestimmten Benutzer abrufen""" + """Get a specific user""" context = await get_context(current_user) - # Gateway-Interface mit Benutzerkontext initialisieren + # Initialize gateway interface with user context user_to_get = context.interface_data.get_user(user_id) if not user_to_get: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Benutzer mit ID {user_id} nicht gefunden" + detail=f"User with ID {user_id} not found" ) - # Berechtigungsprüfung - # Benutzer darf nur sich selbst abrufen, Admin nur Benutzer des eigenen Mandanten, SysAdmin alle + # Permission check + # User can only view themselves, Admin only users of their own mandate, SysAdmin all if user_id == context.user_id: - # Benutzer darf sich selbst abrufen + # User can view themselves pass elif current_user.get("privilege") == "admin" and user_to_get.get("mandate_id") == context.mandate_id: - # Admin darf Benutzer des eigenen Mandanten abrufen + # Admin can view users of their own mandate pass elif current_user.get("privilege") == "sysadmin": - # SysAdmin darf alle Benutzer abrufen + # SysAdmin can view all users pass else: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Abrufen dieses Benutzers" + detail="No permission to view this user" ) return user_to_get @router.put("/{user_id}", response_model=Dict[str, Any]) async def update_user( - user_id: int = Path(..., description="ID des zu aktualisierenden Benutzers"), - user_data: Dict[str, Any] = Body(..., description="Aktualisierte Benutzerdaten"), + user_id: int = Path(..., description="ID of the user to update"), + user_data: Dict[str, Any] = Body(..., description="Updated user data"), current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen bestehenden Benutzer aktualisieren""" + """Update an existing user""" context = await get_context(current_user) - # Benutzer existiert? + # User exists? user_to_update = context.interface_data.get_user(user_id) if not user_to_update: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Benutzer mit ID {user_id} nicht gefunden" + detail=f"User with ID {user_id} not found" ) - # Berechtigungsprüfung + # Permission check is_self_update = user_id == context.user_id is_admin = current_user.get("privilege") == "admin" is_sysadmin = current_user.get("privilege") == "sysadmin" same_mandate = user_to_update.get("mandate_id") == context.mandate_id - # Filtere erlaubte Felder je nach Berechtigungsstufe + # Filter allowed fields based on permission level allowed_fields = {"username", "email", "full_name", "language"} sensitive_fields = {"mandate_id", "disabled", "privilege"} - # Prüfe, ob sensitive Felder geändert werden sollen + # Check if sensitive fields should be changed sensitive_update = any(field in user_data for field in sensitive_fields) if is_self_update and sensitive_update: - # Normale Benutzer dürfen ihre sensitiven Daten nicht ändern + # Normal users cannot change their sensitive data raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Ändern sensitiver Benutzerdaten" + detail="No permission to change sensitive user data" ) elif is_admin and sensitive_update and not same_mandate: - # Admins dürfen sensitive Daten nur für Benutzer des eigenen Mandanten ändern + # Admins can only change sensitive data for users of their own mandate raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Ändern sensitiver Daten für Benutzer anderer Mandanten" + detail="No permission to change sensitive data for users of other mandates" ) elif not (is_self_update or (is_admin and same_mandate) or is_sysadmin): - # Keine Berechtigung für andere Fälle + # No permission for other cases raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Aktualisieren dieses Benutzers" + detail="No permission to update this user" ) - # Attribute aus dem Request dynamisch filtern + # Dynamically filter attributes from the request update_data = {} for attr in user_attributes: - if attr in user_data and attr not in ["id"]: # ID kann nicht geändert werden + if attr in user_data and attr not in ["id"]: # ID cannot be changed update_data[attr] = user_data[attr] - # Entferne nicht erlaubte Felder für normale Benutzer + # Remove disallowed fields for normal users if not (is_admin or is_sysadmin): update_data = {k: v for k, v in update_data.items() if k in allowed_fields} - # User-Daten aktualisieren + # Update user data updated_user = context.interface_data.update_user(user_id, update_data) return updated_user @router.delete("/{user_id}", status_code=status.HTTP_204_NO_CONTENT) async def delete_user( - user_id: int = Path(..., description="ID des zu löschenden Benutzers"), + user_id: int = Path(..., description="ID of the user to delete"), current_user: Dict[str, Any] = Depends(get_current_active_user) ): - """Einen Benutzer löschen""" + """Delete a user""" context = await get_context(current_user) - # Benutzer existiert? + # User exists? user_to_delete = context.interface_data.get_user(user_id) if not user_to_delete: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, - detail=f"Benutzer mit ID {user_id} nicht gefunden" + detail=f"User with ID {user_id} not found" ) - # Berechtigungsprüfung + # Permission check is_self_delete = user_id == context.user_id is_admin = current_user.get("privilege") == "admin" is_sysadmin = current_user.get("privilege") == "sysadmin" same_mandate = user_to_delete.get("mandate_id") == context.mandate_id if is_self_delete: - # Benutzer darf sich selbst löschen + # User can delete themselves pass elif is_admin and same_mandate: - # Admin darf Benutzer des eigenen Mandanten löschen + # Admin can delete users of their own mandate pass elif is_sysadmin: - # SysAdmin darf alle Benutzer löschen + # SysAdmin can delete all users pass else: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="Keine Berechtigung zum Löschen dieses Benutzers" + detail="No permission to delete this user" ) - # Benutzer und alle referenzierten Objekte löschen + # Delete user and all referenced objects success = context.interface_data.delete_user(user_id) if not success: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Fehler beim Löschen des Benutzers mit ID {user_id}" + detail=f"Error deleting user with ID {user_id}" ) - # Kein Inhalt zurückgeben bei erfolgreichem Löschen + # Return no content on successful deletion return None \ No newline at end of file