From b7c2fa86474274ad618f17f6f18783c77b3506c2 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Wed, 7 May 2025 02:08:09 +0200 Subject: [PATCH] prod azure 1.0.11 --- app.py | 3 + config.ini | 5 + env_dev.env | 7 +- env_prod.env | 7 +- modules/BACKUP-gatewayInterface.py | 471 ------ modules/BACKUP-lucydomInterface.py | 1343 ----------------- modules/agentEmail.py | 656 ++++++++ modules/lucydomInterface.py | 8 +- requirements.txt | 5 +- routes/routeMsft.py | 405 +++++ static/10_email_preview.html | 42 + static/11_email_template.json | 6 + static/12_email_preview.html | 42 + static/13_email_template.json | 6 + static/14_microsoft_authentication.html | 47 + static/15_microsoft_authentication.html | 28 + static/16_email_preview.html | 42 + static/17_email_template.json | 6 + static/18_generated_code.py | 48 + static/19_execution_history.json | 19 + static/20_prime_numbers.csv | 1000 ++++++++++++ static/21_email_preview.html | 42 + static/22_email_template.json | 6 + static/23_documentProcessor.py | 933 ++++++++++++ static/24_defAttributes.py | 123 ++ static/25_email_preview.html | 42 + static/26_email_template.json | 6 + static/27_email_preview.html | 42 + static/28_email_template.json | 6 + static/6_email_preview.html | 42 + static/7_email_template.json | 6 + static/8_email_preview.html | 74 + static/9_email_template.json | 6 + .../7d08aab9-a170-4975-8898-bc7e0a95488e.json | 1 + 34 files changed, 3705 insertions(+), 1820 deletions(-) delete mode 100644 modules/BACKUP-gatewayInterface.py delete mode 100644 modules/BACKUP-lucydomInterface.py create mode 100644 modules/agentEmail.py create mode 100644 routes/routeMsft.py create mode 100644 static/10_email_preview.html create mode 100644 static/11_email_template.json create mode 100644 static/12_email_preview.html create mode 100644 static/13_email_template.json create mode 100644 static/14_microsoft_authentication.html create mode 100644 static/15_microsoft_authentication.html create mode 100644 static/16_email_preview.html create mode 100644 static/17_email_template.json create mode 100644 static/18_generated_code.py create mode 100644 static/19_execution_history.json create mode 100644 static/20_prime_numbers.csv create mode 100644 static/21_email_preview.html create mode 100644 static/22_email_template.json create mode 100644 static/23_documentProcessor.py create mode 100644 static/24_defAttributes.py create mode 100644 static/25_email_preview.html create mode 100644 static/26_email_template.json create mode 100644 static/27_email_preview.html create mode 100644 static/28_email_template.json create mode 100644 static/6_email_preview.html create mode 100644 static/7_email_template.json create mode 100644 static/8_email_preview.html create mode 100644 static/9_email_template.json create mode 100644 token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json diff --git a/app.py b/app.py index 046d105f..038e2af5 100644 --- a/app.py +++ b/app.py @@ -197,5 +197,8 @@ app.include_router(promptRouter) from routes.routeWorkflows import router as workflowRouter app.include_router(workflowRouter) +from routes.routeMsft import router as msftRouter +app.include_router(msftRouter) + #if __name__ == "__main__": # uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True) \ No newline at end of file diff --git a/config.ini b/config.ini index e4b85267..564a0fa4 100644 --- a/config.ini +++ b/config.ini @@ -45,3 +45,8 @@ Agent_Webcrawler_SERPAPI_USER_AGENT = Mozilla/5.0 (Windows NT 10.0; Win64; x64) Agent_Coder_INSTALL_TIMEOUT = 180 Agent_Coder_EXECUTION_TIMEOUT = 60 Agent_Coder_EXECUTION_RETRY = 5 + +# Agent Mail configuration +Agent_Mail_MSFT_CLIENT_ID = c7e7112d-61dc-4f3a-8cd3-08cc4cd7504c +Agent_Mail_MSFT_CLIENT_SECRET = Kxf8Q~2lJIteZ~JaI32kMf1lfaWKATqxXiNiFbzV +Agent_Mail_MSFT_TENANT_ID = common diff --git a/env_dev.env b/env_dev.env index 5b56945b..8ecb6aed 100644 --- a/env_dev.env +++ b/env_dev.env @@ -3,7 +3,7 @@ # System Configuration APP_ENV_TYPE = dev APP_ENV_LABEL = Development Instance Patrick -APP_CALL=uvicorn app:app --host 0.0.0.0 --port 8000 +APP_API_URL = http://localhost:8080 # Database Configuration System DB_SYSTEM_HOST=D:/Temp/_powerondb @@ -32,4 +32,7 @@ APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S APP_LOGGING_CONSOLE_ENABLED = True APP_LOGGING_FILE_ENABLED = True APP_LOGGING_ROTATION_SIZE = 10485760 -APP_LOGGING_BACKUP_COUNT = 5 \ No newline at end of file +APP_LOGGING_BACKUP_COUNT = 5 + +# Agent Mail +Agent_Mail_MSFT_REDIRECT_URI = http://localhost:8000/api/msft/auth/callback \ No newline at end of file diff --git a/env_prod.env b/env_prod.env index 8a2e2aaf..57b1da4b 100644 --- a/env_prod.env +++ b/env_prod.env @@ -3,7 +3,7 @@ # System Configuration APP_ENV_TYPE = prod APP_ENV_LABEL = Production Instance -APP_CALL=uvicorn app:app --host 0.0.0.0 --port 8000 +APP_API_URL = https://gateway.poweron-center.net # Database Configuration System DB_SYSTEM_HOST=/home/_powerondb @@ -32,4 +32,7 @@ APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S APP_LOGGING_CONSOLE_ENABLED = True APP_LOGGING_FILE_ENABLED = True APP_LOGGING_ROTATION_SIZE = 10485760 -APP_LOGGING_BACKUP_COUNT = 5 \ No newline at end of file +APP_LOGGING_BACKUP_COUNT = 5 + +# Agent Mail +Agent_Mail_MSFT_REDIRECT_URI = https://gateway.poweron-center.net/api/msft/auth/callback diff --git a/modules/BACKUP-gatewayInterface.py b/modules/BACKUP-gatewayInterface.py deleted file mode 100644 index 3e1120c7..00000000 --- a/modules/BACKUP-gatewayInterface.py +++ /dev/null @@ -1,471 +0,0 @@ -""" -Interface to the Gateway system. -Manages users and mandates for authentication. -""" - -import os -import logging -from typing import Dict, Any, List, Optional, Union -import importlib -from passlib.context import CryptContext - -from connectors.connectorDbJson import DatabaseConnector -from modules.configuration import APP_CONFIG - -logger = logging.getLogger(__name__) - -# Password-Hashing -pwdContext = CryptContext(schemes=["argon2"], deprecated="auto") - - -class GatewayInterface: - """ - Interface to the Gateway system. - Manages users and mandates. - """ - - def __init__(self, mandateId: int = None, userId: int = None): - """ - Initializes the Gateway Interface with optional mandate and user context. - - Args: - mandateId: ID of the current mandate (optional) - userId: ID of the current user (optional) - """ - # Context can be empty during initialization - self.mandateId = mandateId - self.userId = userId - - # Import data model module - try: - self.modelModule = importlib.import_module("modules.gatewayModel") - logger.info("gatewayModel successfully imported") - except ImportError as e: - logger.error(f"Error importing gatewayModel: {e}") - raise - - # Initialize database - self._initializeDatabase() - - def _initializeDatabase(self): - """ - Initializes the database with minimal objects - """ - - self.db = DatabaseConnector( - dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"), - dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"), - dbUser=APP_CONFIG.get("DB_SYSTEM_USER"), - dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"), - mandateId=self.mandateId if self.mandateId else 0, - userId=self.userId if self.userId else 0 - ) - - # Create Root mandate if needed - existingMandateId = self.getInitialId("mandates") - mandates = self.db.getRecordset("mandates") - if existingMandateId is None or not mandates: - logger.info("Creating Root mandate") - rootMandate = { - "name": "Root", - "language": "de" - } - createdMandate = self.db.recordCreate("mandates", rootMandate) - logger.info(f"Root mandate created with ID {createdMandate['id']}") - - # Update mandate context - self.mandateId = createdMandate['id'] - self.userId = createdMandate['userId'] - - # Recreate connector with correct context - self.db = DatabaseConnector( - dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"), - dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"), - dbUser=APP_CONFIG.get("DB_SYSTEM_USER"), - dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"), - mandateId=self.mandateId, - userId=self.userId - ) - - # Create Admin user if needed - existingUserId = self.getInitialId("users") - users = self.db.getRecordset("users") - if existingUserId is None or not users: - logger.info("Creating Admin user") - adminUser = { - "mandateId": self.mandateId, - "username": "admin", - "email": "admin@example.com", - "fullName": "Administrator", - "disabled": False, - "language": "de", - "privilege": "sysadmin", # SysAdmin privilege - "hashedPassword": self._getPasswordHash("admin") # Use a secure password in production! - } - createdUser = self.db.recordCreate("users", adminUser) - logger.info(f"Admin user created with ID {createdUser['id']}") - - # Update user context - self.userId = createdUser['id'] - - # Recreate connector with correct context - self.db = DatabaseConnector( - dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"), - dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"), - dbUser=APP_CONFIG.get("DB_SYSTEM_USER"), - dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"), - mandateId=self.mandateId, - userId=self.userId - ) - - def getInitialId(self, table: str) -> Optional[int]: - """Returns the initial ID for a table""" - return self.db.getInitialId(table) - - def _getPasswordHash(self, password: str) -> str: - """Creates a hash for a password""" - return pwdContext.hash(password) - - def _verifyPassword(self, plainPassword: str, hashedPassword: str) -> bool: - """Checks if the password matches the hash""" - return pwdContext.verify(plainPassword, hashedPassword) - - def _getCurrentTimestamp(self) -> str: - """Returns the current timestamp in ISO format""" - from datetime import datetime - return datetime.now().isoformat() - - # Mandate methods - - def getAllMandates(self) -> List[Dict[str, Any]]: - """Returns all mandates""" - return self.db.getRecordset("mandates") - - def getMandate(self, mandateId: int) -> Optional[Dict[str, Any]]: - """Returns a mandate by its ID""" - mandates = self.db.getRecordset("mandates", recordFilter={"id": mandateId}) - if mandates: - return mandates[0] - return None - - def createMandate(self, name: str, language: str = "de") -> Dict[str, Any]: - """Creates a new mandate""" - mandateData = { - "name": name, - "language": language - } - - return self.db.recordCreate("mandates", mandateData) - - def updateMandate(self, mandateId: int, mandateData: Dict[str, Any]) -> Dict[str, Any]: - """ - Updates an existing mandate - - Args: - mandateId: The ID of the mandate to update - mandateData: The mandate data to update - - Returns: - Dict[str, Any]: The updated mandate data - - Raises: - ValueError: If the mandate is not found - """ - # Check if the mandate exists - mandate = self.getMandate(mandateId) - if not mandate: - raise ValueError(f"Mandate with ID {mandateId} not found") - - # Update the mandate - updatedMandate = self.db.recordModify("mandates", mandateId, mandateData) - - return updatedMandate - - def deleteMandate(self, mandateId: int) -> bool: - """ - Deletes a mandate and all associated users and data - - Args: - mandateId: The ID of the mandate to delete - - Returns: - bool: True if the mandate was successfully deleted, otherwise False - """ - # Check if the mandate exists - mandate = self.getMandate(mandateId) - if not mandate: - return False - - # Check if it's the initial mandate - initialMandateId = self.getInitialId("mandates") - if initialMandateId is not None and mandateId == initialMandateId: - logger.warning(f"Attempt to delete the Root mandate was prevented") - return False - - # Find all users of the mandate - users = self.getUsersByMandate(mandateId) - - # Delete all users of the mandate and their associated data - for user in users: - self.deleteUser(user["id"]) - - # Delete the mandate - success = self.db.recordDelete("mandates", mandateId) - - if success: - logger.info(f"Mandate with ID {mandateId} was successfully deleted") - else: - logger.error(f"Error deleting mandate with ID {mandateId}") - - return success - - # User methods - - def getAllUsers(self) -> List[Dict[str, Any]]: - """Returns all users""" - users = self.db.getRecordset("users") - # Remove password hashes from the response - for user in users: - if "hashedPassword" in user: - del user["hashedPassword"] - return users - - def getUsersByMandate(self, mandateId: int) -> List[Dict[str, Any]]: - """ - Returns all users of a specific mandate - - Args: - mandateId: The ID of the mandate - - Returns: - List[Dict[str, Any]]: List of users in the mandate - """ - users = self.db.getRecordset("users", recordFilter={"mandateId": mandateId}) - # Remove password hashes from the response - for user in users: - if "hashedPassword" in user: - del user["hashedPassword"] - return users - - def getUserByUsername(self, username: str) -> Optional[Dict[str, Any]]: - """Returns a user by username""" - users = self.db.getRecordset("users") - for user in users: - if user.get("username") == username: - return user - return None - - def getUser(self, userId: int) -> Optional[Dict[str, Any]]: - """Returns a user by ID""" - users = self.db.getRecordset("users", recordFilter={"id": userId}) - if users: - user = users[0] - # Remove password hash from the API response - if "hashedPassword" in user: - userCopy = user.copy() - del userCopy["hashedPassword"] - return userCopy - return user - return None - - def createUser(self, username: str, password: str, email: str = None, - fullName: str = None, language: str = "de", mandateId: int = None, - disabled: bool = False, privilege: str = "user") -> Dict[str, Any]: - """ - Creates a new user - - Args: - username: The username - password: The password - email: The email address (optional) - fullName: The full name (optional) - language: The preferred language (default: "de") - mandateId: The ID of the mandate (optional) - disabled: Whether the user is disabled (default: False) - privilege: The privilege level (default: "user") - - Returns: - Dict[str, Any]: The created user data - - Raises: - ValueError: If the username already exists - """ - # Check if the username already exists - existingUser = self.getUserByUsername(username) - if existingUser: - raise ValueError(f"User '{username}' already exists") - - # Use the provided mandateId or the current context - userMandateId = mandateId if mandateId is not None else self.mandateId - - userData = { - "mandateId": userMandateId, - "username": username, - "email": email, - "fullName": fullName, - "disabled": disabled, - "language": language, - "privilege": privilege, - "hashedPassword": self._getPasswordHash(password) - } - - createdUser = self.db.recordCreate("users", userData) - - # Remove password hash from the response - if "hashedPassword" in createdUser: - del createdUser["hashedPassword"] - - return createdUser - - def authenticateUser(self, username: str, password: str) -> Optional[Dict[str, Any]]: - """ - Authenticates a user by username and password - - Args: - username: The username - password: The password - - Returns: - Optional[Dict[str, Any]]: The user data or None if authentication fails - """ - user = self.getUserByUsername(username) - - if not user: - return None - - if not self._verifyPassword(password, user.get("hashedPassword", "")): - return None - - # Check if the user is disabled - if user.get("disabled", False): - return None - - # Create a copy without password hash - authenticatedUser = {**user} - if "hashedPassword" in authenticatedUser: - del authenticatedUser["hashedPassword"] - - return authenticatedUser - - def updateUser(self, userId: int, userData: Dict[str, Any]) -> Dict[str, Any]: - """ - Updates a user - - Args: - userId: The ID of the user to update - userData: The user data to update - - Returns: - Dict[str, Any]: The updated user data - - Raises: - ValueError: If the user is not found - """ - # Get the current user with password hash (directly from DB) - users = self.db.getRecordset("users", recordFilter={"id": userId}) - if not users: - raise ValueError(f"User with ID {userId} not found") - - user = users[0] - - # If the password is being changed, hash it - if "password" in userData: - userData["hashedPassword"] = self._getPasswordHash(userData["password"]) - del userData["password"] - - # Update the user - updatedUser = self.db.recordModify("users", userId, userData) - - # Remove password hash from the response - if "hashedPassword" in updatedUser: - del updatedUser["hashedPassword"] - - return updatedUser - - def disableUser(self, userId: int) -> Dict[str, Any]: - """Disables a user""" - return self.updateUser(userId, {"disabled": True}) - - def enableUser(self, userId: int) -> Dict[str, Any]: - """Enables a user""" - return self.updateUser(userId, {"disabled": False}) - - def _deleteUserReferencedData(self, userId: int) -> None: - """ - Deletes all data associated with a user - - Args: - userId: The ID of the user - """ - # Here all tables are searched and all entries referencing this user are deleted - - # Delete user attributes - try: - attributes = self.db.getRecordset("attributes", recordFilter={"userId": userId}) - for attribute in attributes: - self.db.recordDelete("attributes", attribute["id"]) - except Exception as e: - logger.error(f"Error deleting attributes for user {userId}: {e}") - - # Other tables that might reference the user - # (Depending on the application's database structure) - - logger.info(f"All referenced data for user {userId} has been deleted") - - def deleteUser(self, userId: int) -> bool: - """ - Deletes a user and all associated data - - Args: - userId: The ID of the user to delete - - Returns: - bool: True if the user was successfully deleted, otherwise False - """ - # Check if the user exists - users = self.db.getRecordset("users", recordFilter={"id": userId}) - if not users: - return False - - # Check if it's the initial user - initialUserId = self.getInitialId("users") - if initialUserId is not None and userId == initialUserId: - logger.warning("Attempt to delete the Root Admin was prevented") - return False - - # Delete all data associated with the user - self._deleteUserReferencedData(userId) - - # Delete the user - success = self.db.recordDelete("users", userId) - - if success: - logger.info(f"User with ID {userId} was successfully deleted") - else: - logger.error(f"Error deleting user with ID {userId}") - - return success - - -# Singleton factory for GatewayInterface instances per context -_gatewayInterfaces = {} - -def getGatewayInterface(mandateId: int = None, userId: int = None) -> GatewayInterface: - """ - Returns a GatewayInterface instance for the specified context. - Reuses existing instances. - - Args: - mandateId: ID of the mandate - userId: ID of the user - - Returns: - GatewayInterface instance - """ - contextKey = f"{mandateId}_{userId}" - if contextKey not in _gatewayInterfaces: - _gatewayInterfaces[contextKey] = GatewayInterface(mandateId, userId) - return _gatewayInterfaces[contextKey] - -# Initialize the interface -getGatewayInterface() \ No newline at end of file diff --git a/modules/BACKUP-lucydomInterface.py b/modules/BACKUP-lucydomInterface.py deleted file mode 100644 index 50ae9fc1..00000000 --- a/modules/BACKUP-lucydomInterface.py +++ /dev/null @@ -1,1343 +0,0 @@ -""" -Interface to LucyDOM database and AI Connectors. -Uses the JSON connector for data access with added language support. -""" - -import os -import logging -import uuid -from datetime import datetime -from typing import Dict, Any, List, Optional, Union - -import importlib -import hashlib - -from modules.mimeUtils import isTextMimeType, determineContentEncoding - -# DYNAMIC PART: Connectors to the Interface -from connectors.connectorDbJson import DatabaseConnector -from connectors.connectorAiOpenai import ChatService - -# Basic Configurations -from modules.configuration import APP_CONFIG -logger = logging.getLogger(__name__) - -# Custom exceptions for file handling -class FileError(Exception): - """Base class for file handling exceptions.""" - pass - -class FileNotFoundError(FileError): - """Exception raised when a file is not found.""" - pass - -class FileStorageError(FileError): - """Exception raised when there's an error storing a file.""" - pass - -class FilePermissionError(FileError): - """Exception raised when there's a permission issue with a file.""" - pass - -class FileDeletionError(FileError): - """Exception raised when there's an error deleting a file.""" - pass - - -class LucyDOMInterface: - """ - Interface to the LucyDOM database. - Uses the JSON connector for data access. - """ - - def __init__(self, mandateId: int, userId: int): - """ - Initializes the LucyDOM Interface with mandate and user context. - - Args: - mandateId: ID of the current mandate - userId: ID of the current user - """ - self.mandateId = mandateId - self.userId = userId - - # Add language settings - self.userLanguage = "en" # Default user language - self.aiService = None # Will be set externally - - # Import data model module - try: - self.modelModule = importlib.import_module("modules.lucydomModel") - logger.info("lucydomModel successfully imported") - except ImportError as e: - logger.error(f"Error importing lucydomModel: {e}") - raise - - # Initialize database if needed - self._initializeDatabase() - - def _initializeDatabase(self): - """ - Initializes the database with minimal objects for the logged-in user in the mandate, if it doesn't exist yet. - No initialization without a valid user. - Creates an initial dataset for each table defined in the data model. - """ - effectiveMandateId = self.mandateId - effectiveUserId = self.userId - if effectiveMandateId is None or effectiveUserId is None: - #data available - return - - self.db = DatabaseConnector( - dbHost=APP_CONFIG.get("DB_LUCYDOM_HOST"), - dbDatabase=APP_CONFIG.get("DB_LUCYDOM_DATABASE"), - dbUser=APP_CONFIG.get("DB_LUCYDOM_USER"), - dbPassword=APP_CONFIG.get("DB_LUCYDOM_PASSWORD_SECRET"), - mandateId=self.mandateId, - userId=self.userId, - skipInitialIdLookup=True - ) - - # Initialize standard prompts for different areas - prompts = self.db.getRecordset("prompts") - if not prompts: - logger.info("Creating standard prompts") - - # Define standard prompts - standardPrompts = [ - { - "mandateId": effectiveMandateId, - "userId": effectiveUserId, - "content": "Research the current market trends and developments in [TOPIC]. Collect information about leading companies, innovative products or services, and current challenges. Present the results in a structured overview with relevant data and sources.", - "name": "Web Research: Market Research" - }, - { - "mandateId": effectiveMandateId, - "userId": effectiveUserId, - "content": "Analyze the attached dataset on [TOPIC] and identify the most important trends, patterns, and anomalies. Perform statistical calculations to support your findings. Present the results in a clearly structured analysis and draw relevant conclusions.", - "name": "Analysis: Data Analysis" - }, - { - "mandateId": effectiveMandateId, - "userId": effectiveUserId, - "content": "Create a detailed protocol of our meeting on [TOPIC]. Capture all discussed points, decisions made, and agreed measures. Structure the protocol clearly with agenda items, participant list, and clear responsibilities for follow-up actions.", - "name": "Protocol: Meeting Minutes" - }, - { - "mandateId": effectiveMandateId, - "userId": effectiveUserId, - "content": "Develop a UI/UX design concept for [APPLICATION/WEBSITE]. Consider the target audience, main functions, and brand identity. Describe the visual design, navigation, interaction patterns, and information architecture. Explain how the design optimizes user-friendliness and user experience.", - "name": "Design: UI/UX Design" - }, - { - "mandateId": effectiveMandateId, - "userId": effectiveUserId, - "content": "Gib mir die ersten 1000 Primzahlen", - "name": "Code: Primzahlen" - } - ] - - # Create prompts - for promptData in standardPrompts: - createdPrompt = self.db.recordCreate("prompts", promptData) - logger.info(f"Prompt '{promptData.get('name', 'Standard')}' was created with ID {createdPrompt['id']}") - - # Language support methods - - def setUserLanguage(self, languageCode: str): - """Set the user's preferred language""" - self.userLanguage = languageCode - logger.info(f"User language set to: {languageCode}") - - async def callAi(self, messages: List[Dict[str, str]], produceUserAnswer: bool = False, temperature: float = None) -> str: - """ - Enhanced AI service call with language support - - Args: - messages: List of message dictionaries - produceUserAnswer: Whether this response is for the end-user - temperature: Optional temperature setting - - Returns: - AI response text - """ - if not self.aiService: - logger.error("AI service not set in LucyDOMInterface") - return "Error: AI service not available" - - # Add language instruction for user-facing responses - if produceUserAnswer and self.userLanguage: - ltext= f"Please respond in '{self.userLanguage}' language." - if messages and messages[0]["role"] == "system": - if "language" not in messages[0]["content"].lower(): - messages[0]["content"] = f"{ltext} {messages[0]['content']}" - else: - # Insert a system message with language instruction - messages.insert(0, { - "role": "system", - "content": ltext - }) - - # Call the AI service - if temperature is not None: - return await self.aiService.callApi(messages, temperature=temperature) - else: - return await self.aiService.callApi(messages) - - # Utilities - - def getInitialId(self, table: str) -> Optional[int]: - """ - Returns the initial ID for a table. - - Args: - table: Name of the table - - Returns: - The initial ID or None if not present - """ - return self.db.getInitialId(table) - - def _getCurrentTimestamp(self) -> str: - """Returns the current timestamp in ISO format""" - return datetime.now().isoformat() - - - # Prompt methods - - def getAllPrompts(self) -> List[Dict[str, Any]]: - """Returns all prompts for the current mandate""" - return self.db.getRecordset("prompts") - - def getPrompt(self, promptId: int) -> Optional[Dict[str, Any]]: - """Returns a prompt by its ID""" - prompts = self.db.getRecordset("prompts", recordFilter={"id": promptId}) - if prompts: - return prompts[0] - return None - - def createPrompt(self, content: str, name: str) -> Dict[str, Any]: - """Creates a new prompt""" - promptData = { - "mandateId": self.mandateId, - "userId": self.userId, - "content": content, - "name": name, - "createdAt": self._getCurrentTimestamp() - } - - return self.db.recordCreate("prompts", promptData) - - def updatePrompt(self, promptId: int, content: str = None, name: str = None) -> Dict[str, Any]: - """ - Updates an existing prompt - - Args: - promptId: ID of the prompt to update - content: New content for the prompt - - Returns: - The updated prompt object - """ - # Check if the prompt exists - prompt = self.getPrompt(promptId) - if not prompt: - return None - - # Prepare data for update - promptData = {} - - if content is not None: - promptData["content"] = content - if name is not None: - promptData["name"] = name - - # Update prompt - return self.db.recordModify("prompts", promptId, promptData) - - def deletePrompt(self, promptId: int) -> bool: - """ - Deletes a prompt from the database - - Args: - promptId: ID of the prompt to delete - - Returns: - True if the prompt was successfully deleted, otherwise False - """ - return self.db.recordDelete("prompts", promptId) - - - # File Utilities - - def calculateFileHash(self, fileContent: bytes) -> str: - """Calculates a SHA-256 hash for the file content""" - return hashlib.sha256(fileContent).hexdigest() - - def checkForDuplicateFile(self, fileHash: str) -> Optional[Dict[str, Any]]: - """Checks if a file with the same hash already exists""" - files = self.db.getRecordset("files", recordFilter={"fileHash": fileHash}) - if files: - return files[0] - return None - - def getMimeType(self, filename: str) -> str: - """Determines the MIME type based on the file extension""" - import os - ext = os.path.splitext(filename)[1].lower()[1:] - extensionToMime = { - "pdf": "application/pdf", - "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - "doc": "application/msword", - "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - "xls": "application/vnd.ms-excel", - "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", - "ppt": "application/vnd.ms-powerpoint", - "csv": "text/csv", - "txt": "text/plain", - "json": "application/json", - "xml": "application/xml", - "html": "text/html", - "htm": "text/html", - "jpg": "image/jpeg", - "jpeg": "image/jpeg", - "png": "image/png", - "gif": "image/gif", - "webp": "image/webp", - "svg": "image/svg+xml", - "py": "text/x-python", - "js": "application/javascript", - "css": "text/css" - } - return extensionToMime.get(ext.lower(), "application/octet-stream") - - - # File methods - metadata-based operations - - def getAllFiles(self) -> List[Dict[str, Any]]: - """ - Returns all files for the current mandate without binary data. - - Returns: - List of FileItem objects without binary data - """ - files = self.db.getRecordset("files") - return files - - def getFile(self, fileId: int) -> Optional[Dict[str, Any]]: - """ - Returns a file by its ID, without binary data. - - Args: - fileId: ID of the file - - Returns: - FileItem without binary data or None if not found - """ - files = self.db.getRecordset("files", recordFilter={"id": fileId}) - if files: - return files[0] - return None - - def createFile(self, name: str, mimeType: str, size: int = None, fileHash: str = None) -> Dict[str, Any]: - """ - Creates a new file entry in the database without content. - The actual file content is stored separately in the FileData table. - - Args: - name: Name of the file - mimeType: MIME type of the file - size: Size of the file in bytes - fileHash: Hash value of the file for deduplication - - Returns: - The created FileItem object - """ - fileData = { - "mandateId": self.mandateId, - "userId": self.userId, - "name": name, - "mimeType": mimeType, - "size": size, - "fileHash": fileHash, - "creationDate": self._getCurrentTimestamp() - } - return self.db.recordCreate("files", fileData) - - def updateFile(self, fileId: int, updateData: Dict[str, Any]) -> Dict[str, Any]: - """ - Updates the metadata of an existing file without affecting the binary data. - - Args: - fileId: ID of the file to update - updateData: Dictionary with fields to update - - Returns: - The updated FileItem object - """ - # Check if the file exists - file = self.getFile(fileId) - if not file: - raise FileNotFoundError(f"File with ID {fileId} not found") - - # Update file - return self.db.recordModify("files", fileId, updateData) - - def deleteFile(self, fileId: int) -> bool: - """ - Deletes a file from the database (metadata and content). - - Args: - fileId: ID of the file - - Returns: - True on success, False on error - """ - try: - # Find the file in the database - file = self.getFile(fileId) - - if not file: - raise FileNotFoundError(f"File with ID {fileId} not found") - - # Check if the file belongs to the current mandate - if file.get("mandateId") != self.mandateId: - raise FilePermissionError(f"No permission to delete file {fileId}") - - # Check for other references to this file (by hash) - fileHash = file.get("fileHash") - if fileHash: - otherReferences = [f for f in self.db.getRecordset("files", recordFilter={"fileHash": fileHash}) - if f.get("id") != fileId] - - # If other files reference this content, only delete the database entry for FileItem - if otherReferences: - logger.info(f"Other references to the file content found, only FileItem will be deleted: {fileId}") - else: - # Also delete the file content in the FileData table - try: - fileDataEntries = self.db.getRecordset("fileData", recordFilter={"id": fileId}) - if fileDataEntries: - self.db.recordDelete("fileData", fileId) - logger.info(f"FileData for file {fileId} deleted") - except Exception as e: - logger.warning(f"Error deleting FileData for file {fileId}: {str(e)}") - - # Delete the FileItem entry - return self.db.recordDelete("files", fileId) - - except FileNotFoundError as e: - # Pass through FileNotFoundError - raise - except FilePermissionError as e: - # Pass through FilePermissionError - raise - except Exception as e: - logger.error(f"Error deleting file {fileId}: {str(e)}") - raise FileDeletionError(f"Error deleting file: {str(e)}") - - - # FileData methods - data operations - - """ - This contains the modified file handling methods for the LucyDOMInterface class - to implement consistent handling of base64 encoding flags. - """ - - def createFileData(self, fileId: int, data: bytes) -> bool: - """ - Stores the binary data of a file in the database, using base64 encoding for binary files. - Always sets the base64Encoded flag appropriately. - - Args: - fileId: ID of the associated file - data: Binary data - - Returns: - True on success, False on error - """ - try: - import base64 - - # Check the file metadata to determine if this should be stored as text or base64 - file = self.getFile(fileId) - if not file: - logger.error(f"File with ID {fileId} not found when storing data") - return False - - # Determine if this is a text-based format that should be stored as text - mimeType = file.get("mimeType", "application/octet-stream") - isTextFormat = isTextMimeType(mimeType) - - base64Encoded = False - fileData = None - - if isTextFormat: - # Try to decode as text - try: - # Convert bytes to text - textContent = data.decode('utf-8') - fileData = textContent - base64Encoded = False - logger.debug(f"Stored file {fileId} as text") - except UnicodeDecodeError: - # Fallback to base64 if text decoding fails - encodedData = base64.b64encode(data).decode('utf-8') - fileData = encodedData - base64Encoded = True - logger.warning(f"Failed to decode text file {fileId}, falling back to base64") - else: - # Binary format - always use base64 - encodedData = base64.b64encode(data).decode('utf-8') - fileData = encodedData - base64Encoded = True - logger.debug(f"Stored file {fileId} as base64") - - # Create the fileData record with data and encoding flag - fileDataObj = { - "id": fileId, - "data": fileData, - "base64Encoded": base64Encoded - } - - self.db.recordCreate("fileData", fileDataObj) - logger.info(f"Successfully stored data for file {fileId} (base64Encoded: {base64Encoded})") - return True - except Exception as e: - logger.error(f"Error storing data for file {fileId}: {str(e)}") - return False - - def getFileData(self, fileId: int) -> Optional[bytes]: - """ - Returns the binary data of a file. - Uses the base64Encoded flag to determine if decoding is necessary. - - Args: - fileId: ID of the file - - Returns: - Binary data or None if not found - """ - import base64 - - fileDataEntries = self.db.getRecordset("fileData", recordFilter={"id": fileId}) - if not fileDataEntries: - logger.warning(f"No data found for file ID {fileId}") - return None - - fileDataEntry = fileDataEntries[0] - if "data" not in fileDataEntry: - logger.warning(f"No data field in file data for ID {fileId}") - return None - - data = fileDataEntry["data"] - base64Encoded = fileDataEntry.get("base64Encoded", False) - - try: - if base64Encoded: - # Decode base64 to bytes - return base64.b64decode(data) - else: - # Convert text to bytes - return data.encode('utf-8') - except Exception as e: - logger.error(f"Error processing file data for {fileId}: {str(e)}") - return None - - def updateFileData(self, fileId: int, data: Union[bytes, str]) -> bool: - """ - Updates the binary data of a file in the database. - Handles base64 encoding based on the file type. - - Args: - fileId: ID of the file - data: New binary data or text content - - Returns: - True on success, False on error - """ - try: - import base64 - - # Check file metadata to determine if this should be stored as text or base64 - file = self.getFile(fileId) - if not file: - logger.error(f"File with ID {fileId} not found when updating data") - return False - - # Determine if this is a text-based format that should be stored as text - mimeType = file.get("mimeType", "application/octet-stream") - isTextFormat = ( - mimeType.startswith("text/") or - mimeType in [ - "application/json", - "application/xml", - "application/javascript", - "application/x-python", - "image/svg+xml" - ] - ) - - base64Encoded = False - fileData = None - - # Convert input data to the right format based on its type and the file's format - if isinstance(data, bytes): - if isTextFormat: - try: - # Try to convert bytes to text - fileData = data.decode('utf-8') - base64Encoded = False - except UnicodeDecodeError: - # Fallback to base64 if text decoding fails - fileData = base64.b64encode(data).decode('utf-8') - base64Encoded = True - else: - # Binary format - use base64 - fileData = base64.b64encode(data).decode('utf-8') - base64Encoded = True - elif isinstance(data, str): - if isTextFormat: - # Text format - store as text - fileData = data - base64Encoded = False - else: - # Check if it's already base64 encoded - try: - # Try to decode as base64 to validate - base64.b64decode(data) - fileData = data - base64Encoded = True - except: - # Not valid base64, encode the string - fileData = base64.b64encode(data.encode('utf-8')).decode('utf-8') - base64Encoded = True - else: - # Convert to string first - stringData = str(data) - if isTextFormat: - fileData = stringData - base64Encoded = False - else: - fileData = base64.b64encode(stringData.encode('utf-8')).decode('utf-8') - base64Encoded = True - - # Check if a record already exists - fileDataEntries = self.db.getRecordset("fileData", recordFilter={"id": fileId}) - - dataUpdate = { - "data": fileData, - "base64Encoded": base64Encoded - } - - if fileDataEntries: - # Update the existing record - self.db.recordModify("fileData", fileId, dataUpdate) - logger.info(f"Updated file data for file ID {fileId} (base64Encoded: {base64Encoded})") - else: - # Create a new record - dataUpdate["id"] = fileId - self.db.recordCreate("fileData", dataUpdate) - logger.info(f"Created new file data for file ID {fileId} (base64Encoded: {base64Encoded})") - - return True - except Exception as e: - logger.error(f"Error updating data for file {fileId}: {str(e)}") - return False - - def saveUploadedFile(self, fileContent: bytes, fileName: str) -> Dict[str, Any]: - """ - Saves an uploaded file in the database. - Metadata is stored in the 'files' table, - Binary data in the 'fileData' table with the appropriate base64Encoded flag. - - Args: - fileContent: Binary data of the file - fileName: Name of the file - - Returns: - Dictionary with metadata of the saved file - """ - try: - # Debug: Log the start of the file upload process - logger.info(f"Starting upload process for file: {fileName}") - - # Debug: Check if fileContent is valid bytes - if not isinstance(fileContent, bytes): - logger.error(f"Invalid fileContent type: {type(fileContent)}") - raise ValueError(f"fileContent must be bytes, got {type(fileContent)}") - - # Calculate file hash for deduplication - fileHash = self.calculateFileHash(fileContent) - logger.debug(f"Calculated file hash: {fileHash}") - - # Check for duplicate - existingFile = self.checkForDuplicateFile(fileHash) - if existingFile: - # Simply return the existing file metadata - logger.info(f"Duplicate found for {fileName}: {existingFile['id']}") - return existingFile - - # Determine MIME type - mimeType = self.getMimeType(fileName) - - # Determine file size - fileSize = len(fileContent) - - # 1. Save metadata in the 'files' table - logger.info(f"Saving file metadata to database for file: {fileName}") - dbFile = self.createFile( - name=fileName, - mimeType=mimeType, - size=fileSize, - fileHash=fileHash - ) - - # 2. Save binary data with appropriate base64 encoding based on file type - logger.info(f"Saving file content to database for file: {fileName}") - self.createFileData(dbFile["id"], fileContent) - - # Debug: Export file to static folder - self._exportFileToStatic(fileContent, dbFile["id"], fileName) # DEBUG TODO - - # Debug: Verify database record was created - if not dbFile: - logger.warning(f"Database record for file {fileName} was not created properly") - else: - logger.debug(f"Database record created for file {fileName}") - - logger.info(f"File upload process completed for: {fileName}") - return dbFile - - except Exception as e: - logger.error(f"Error in saveUploadedFile for {fileName}: {str(e)}", exc_info=True) - raise FileStorageError(f"Error saving file: {str(e)}") - - def downloadFile(self, fileId: int) -> Optional[Dict[str, Any]]: - """ - Returns a file for download, including binary data. - Uses the base64Encoded flag to determine how to process the file data. - - Args: - fileId: ID of the file - - Returns: - Dictionary with file data and metadata or None if not found - """ - try: - # 1. Get metadata from the 'files' table - file = self.getFile(fileId) - - if not file: - raise FileNotFoundError(f"File with ID {fileId} not found") - - # 2. Get binary data from the 'fileData' table using the new flag-aware method - fileContent = self.getFileData(fileId) - - if fileContent is None: - raise FileNotFoundError(f"Binary data for file with ID {fileId} not found") - - return { - "id": fileId, - "name": file.get("name", f"file_{fileId}"), - "contentType": file.get("mimeType", "application/octet-stream"), - "size": file.get("size", len(fileContent)), - "content": fileContent - } - except FileNotFoundError as e: - # Re-raise FileNotFoundError as is - raise - except Exception as e: - logger.error(f"Error downloading file {fileId}: {str(e)}") - raise FileError(f"Error downloading file: {str(e)}") - - def _exportFileToStatic(self, fileContent: bytes, fileId: int, fileName: str): - debugFilename = f"{fileId}_{fileName}" - with open(f"./static/{debugFilename}", 'wb') as f: - f.write(fileContent) - - # Workflow methods - - def getAllWorkflows(self) -> List[Dict[str, Any]]: - """Returns all workflows for the current mandate""" - return self.db.getRecordset("workflows") - - def getWorkflowsByUser(self, userId: int) -> List[Dict[str, Any]]: - """Returns all workflows for a user""" - return self.db.getRecordset("workflows", recordFilter={"userId": userId}) - - def getWorkflow(self, workflowId: str) -> Optional[Dict[str, Any]]: - """Returns a workflow by its ID""" - workflows = self.db.getRecordset("workflows", recordFilter={"id": workflowId}) - if workflows: - return workflows[0] - return None - - def createWorkflow(self, workflowData: Dict[str, Any]) -> Dict[str, Any]: - """Creates a new workflow in the database""" - # Make sure mandateId and userId are set - if "mandateId" not in workflowData: - workflowData["mandateId"] = self.mandateId - - if "userId" not in workflowData: - workflowData["userId"] = self.userId - - # Set timestamp if not present - currentTime = self._getCurrentTimestamp() - if "startedAt" not in workflowData: - workflowData["startedAt"] = currentTime - - if "lastActivity" not in workflowData: - workflowData["lastActivity"] = currentTime - - return self.db.recordCreate("workflows", workflowData) - - def updateWorkflow(self, workflowId: str, workflowData: Dict[str, Any]) -> Dict[str, Any]: - """ - Updates an existing workflow. - - Args: - workflowId: ID of the workflow to update - workflowData: New data for the workflow - - Returns: - The updated workflow object - """ - # Check if the workflow exists - workflow = self.getWorkflow(workflowId) - if not workflow: - return None - - # Set update time - workflowData["lastActivity"] = self._getCurrentTimestamp() - - # Update workflow - return self.db.recordModify("workflows", workflowId, workflowData) - - def deleteWorkflow(self, workflowId: str) -> bool: - """ - Deletes a workflow from the database. - - Args: - workflowId: ID of the workflow to delete - - Returns: - True on success, False if the workflow doesn't exist - """ - # Check if the workflow exists - workflow = self.getWorkflow(workflowId) - if not workflow: - return False - - # Check if the user is the owner or has admin rights - if workflow.get("userId") != self.userId: - # Here could be a check for admin rights - return False - - # Delete workflow - return self.db.recordDelete("workflows", workflowId) - - - # Workflow Messages - - def getWorkflowMessages(self, workflowId: str) -> List[Dict[str, Any]]: - """Returns all messages of a workflow""" - return self.db.getRecordset("workflowMessages", recordFilter={"workflowId": workflowId}) - - def createWorkflowMessage(self, messageData: Dict[str, Any]) -> Dict[str, Any]: - """ - Creates a new message for a workflow. - - Args: - messageData: The message data - - Returns: - The created message or None on error - """ - try: - # Check if required fields are present - requiredFields = ["id", "workflowId"] - for field in requiredFields: - if field not in messageData: - logger.error(f"Required field '{field}' missing in messageData") - raise ValueError(f"Required field '{field}' missing in message data") - - # Validate that ID is not None - if messageData["id"] is None: - messageData["id"] = f"msg_{uuid.uuid4()}" - logger.warning(f"Automatically generated ID for workflow message: {messageData['id']}") - - # Ensure required fields are present - if "startedAt" not in messageData and "createdAt" not in messageData: - messageData["startedAt"] = self._getCurrentTimestamp() - - if "createdAt" in messageData and "startedAt" not in messageData: - messageData["startedAt"] = messageData["createdAt"] - del messageData["createdAt"] - - # Set status if not present - if "status" not in messageData: - messageData["status"] = "completed" - - # Set sequence number if not present - if "sequenceNo" not in messageData: - # Get current messages to determine next sequence number - existingMessages = self.getWorkflowMessages(messageData["workflowId"]) - messageData["sequenceNo"] = len(existingMessages) + 1 - - # Ensure role and agentName are present - if "role" not in messageData: - messageData["role"] = "assistant" if messageData.get("agentName") else "user" - - if "agentName" not in messageData: - messageData["agentName"] = "" - - # Debug log for data to create - logger.debug(f"Creating workflow message with data: {messageData}") - - # Create message in database - createdMessage = self.db.recordCreate("workflowMessages", messageData) - - # Update workflow's messageIds if this is a new message - if createdMessage: - workflowId = messageData["workflowId"] - workflow = self.getWorkflow(workflowId) - - if workflow: - # Get current messageIds or initialize empty list - messageIds = workflow.get("messageIds", []) - - # Add the new message ID if not already in the list - if createdMessage["id"] not in messageIds: - messageIds.append(createdMessage["id"]) - self.updateWorkflow(workflowId, {"messageIds": messageIds}) - - return createdMessage - except Exception as e: - logger.error(f"Error creating workflow message: {str(e)}") - # Return None instead of raising to avoid cascading failures - return None - - def updateWorkflowMessage(self, messageId: str, messageData: Dict[str, Any]) -> Dict[str, Any]: - """ - Updates an existing workflow message in the database. - - Args: - messageId: ID of the message - messageData: Data to update - - Returns: - The updated message object or None on error - """ - try: - # Debug info - logger.debug(f"Updating message {messageId} in database") - - # Ensure messageId is provided - if not messageId: - logger.error("No messageId provided for updateWorkflowMessage") - raise ValueError("messageId cannot be empty") - - # Check if message exists in database - messages = self.db.getRecordset("workflowMessages", recordFilter={"id": messageId}) - if not messages: - logger.warning(f"Message with ID {messageId} does not exist in database") - - # If message doesn't exist but we have workflowId, create it - if "workflowId" in messageData: - logger.info(f"Creating new message with ID {messageId} for workflow {messageData.get('workflowId')}") - return self.db.recordCreate("workflowMessages", messageData) - else: - logger.error(f"Workflow ID missing for new message {messageId}") - return None - - # Update existing message - existingMessage = messages[0] - - # Ensure required fields present - for key in ["role", "agentName"]: - if key not in messageData and key not in existingMessage: - messageData[key] = "assistant" if key == "role" else "" - - # Ensure ID is in the dataset - if 'id' not in messageData: - messageData['id'] = messageId - - # Convert createdAt to startedAt if needed - if "createdAt" in messageData and "startedAt" not in messageData: - messageData["startedAt"] = messageData["createdAt"] - del messageData["createdAt"] - - # Update the message - updatedMessage = self.db.recordModify("workflowMessages", messageId, messageData) - if updatedMessage: - logger.info(f"Message {messageId} updated successfully") - else: - logger.warning(f"Failed to update message {messageId}") - - return updatedMessage - except Exception as e: - logger.error(f"Error updating message {messageId}: {str(e)}", exc_info=True) - # Re-raise with full information - raise ValueError(f"Error updating message {messageId}: {str(e)}") - - def deleteWorkflowMessage(self, workflowId: str, messageId: str) -> bool: - """ - Deletes a message from a workflow in the database. - - Args: - workflowId: ID of the associated workflow - messageId: ID of the message to delete - - Returns: - True on success, False on error - """ - try: - # Check if the message exists - messages = self.getWorkflowMessages(workflowId) - message = next((m for m in messages if m.get("id") == messageId), None) - - if not message: - logger.warning(f"Message {messageId} for workflow {workflowId} not found") - return False - - # Delete the message from the database - return self.db.recordDelete("workflowMessages", messageId) - except Exception as e: - logger.error(f"Error deleting message {messageId}: {str(e)}") - return False - - def deleteFileFromMessage(self, workflowId: str, messageId: str, fileId: int) -> bool: - """ - Removes a file reference from a message. - The file itself is not deleted, only the reference in the message. - Enhanced version with improved file matching. - - Args: - workflowId: ID of the associated workflow - messageId: ID of the message - fileId: ID of the file to remove - - Returns: - True on success, False on error - """ - try: - # Log operation - logger.info(f"Removing file {fileId} from message {messageId} in workflow {workflowId}") - - # Get all workflow messages - allMessages = self.getWorkflowMessages(workflowId) - logger.debug(f"Workflow {workflowId} has {len(allMessages)} messages") - - # Try different approaches to find the message - message = None - - # Exact match - message = next((m for m in allMessages if m.get("id") == messageId), None) - - # Case-insensitive match - if not message and isinstance(messageId, str): - message = next((m for m in allMessages - if isinstance(m.get("id"), str) and m.get("id").lower() == messageId.lower()), None) - - # Partial match (starts with) - if not message and isinstance(messageId, str): - message = next((m for m in allMessages - if isinstance(m.get("id"), str) and m.get("id").startswith(messageId)), None) - - if not message: - logger.warning(f"Message {messageId} not found in workflow {workflowId}") - return False - - # Log the found message - logger.info(f"Found message: {message.get('id')}") - - # Check if message has documents - if "documents" not in message or not message["documents"]: - logger.warning(f"No documents in message {messageId}") - return False - - # Log existing documents - documents = message.get("documents", []) - logger.debug(f"Message has {len(documents)} documents") - for i, doc in enumerate(documents): - docId = doc.get("id", "unknown") - fileIdValue = doc.get("fileId", "unknown") - logger.debug(f"Document {i}: docId={docId}, fileId={fileIdValue}") - - # Create a new list of documents without the one to delete - updatedDocuments = [] - removed = False - - for doc in documents: - docId = doc.get("id") - fileIdValue = doc.get("fileId") - - # Flexible matching approach - shouldRemove = ( - (docId == fileId) or - (fileIdValue == fileId) or - (isinstance(docId, str) and str(fileId) in docId) or - (isinstance(fileIdValue, str) and str(fileId) in fileIdValue) - ) - - if shouldRemove: - removed = True - logger.info(f"Found file to remove: docId={docId}, fileId={fileIdValue}") - else: - updatedDocuments.append(doc) - - if not removed: - logger.warning(f"No matching file {fileId} found in message {messageId}") - return False - - # Update message with modified documents array - messageUpdate = { - "documents": updatedDocuments - } - - # Apply the update directly to the database - updated = self.db.recordModify("workflowMessages", message["id"], messageUpdate) - - if updated: - logger.info(f"Successfully removed file {fileId} from message {messageId}") - return True - else: - logger.warning(f"Failed to update message {messageId} in database") - return False - - except Exception as e: - logger.error(f"Error removing file {fileId} from message {messageId}: {str(e)}") - return False - - - # Workflow Logs - - def getWorkflowLogs(self, workflowId: str) -> List[Dict[str, Any]]: - """Returns all log entries for a workflow""" - return self.db.getRecordset("workflowLogs", recordFilter={"workflowId": workflowId}) - - def createWorkflowLog(self, logData: Dict[str, Any]) -> Dict[str, Any]: - """Creates a new log entry for a workflow""" - # Make sure required fields are present - if "timestamp" not in logData: - logData["timestamp"] = self._getCurrentTimestamp() - - # Add status information if not present - if "status" not in logData and "type" in logData: - if logData["type"] == "error": - logData["status"] = "error" - else: - logData["status"] = "running" - - # Add progress information if not present - if "progress" not in logData: - # Default progress values based on log type - if logData.get("type") == "info": - logData["progress"] = 50 # Default middle progress - elif logData.get("type") == "error": - logData["progress"] = -1 # Error state - elif logData.get("type") == "warning": - logData["progress"] = 50 # Default middle progress - - return self.db.recordCreate("workflowLogs", logData) - - - # Workflow Management - - def saveWorkflowState(self, workflow: Dict[str, Any], saveMessages: bool = True, saveLogs: bool = True) -> bool: - """ - Saves the state of a workflow to the database. - Workflow data is updated, but messages are stored separately. - - Args: - workflow: The workflow object - saveMessages: Flag to determine if messages should be saved - saveLogs: Flag to determine if logs should be saved - - Returns: - True on success, False on failure - """ - try: - workflowId = workflow.get("id") - if not workflowId: - return False - - # Extract only the database-relevant workflow fields - # IMPORTANT: Don't store messages in the workflow table! - workflowDbData = { - "id": workflowId, - "mandateId": workflow.get("mandateId", self.mandateId), - "userId": workflow.get("userId", self.userId), - "name": workflow.get("name", f"Workflow {workflowId}"), - "status": workflow.get("status", "completed"), - "startedAt": workflow.get("startedAt", self._getCurrentTimestamp()), - "lastActivity": workflow.get("lastActivity", self._getCurrentTimestamp()), - "dataStats": workflow.get("dataStats", {}) - } - - # Check if workflow already exists - existingWorkflow = self.getWorkflow(workflowId) - if existingWorkflow: - self.updateWorkflow(workflowId, workflowDbData) - else: - self.createWorkflow(workflowDbData) - - # Save messages - if saveMessages and "messages" in workflow: - for message in workflow["messages"]: - messageId = message.get("id") - if not messageId: - continue - - # Since each message is already saved with createWorkflowMessage, - # we only need to check if updates are necessary - # First, get existing message from database - existingMessages = self.getWorkflowMessages(workflowId) - existingMessage = next((m for m in existingMessages if m.get("id") == messageId), None) - - if existingMessage: - # Check if updates are needed - hasChanges = False - for key in ["role", "agentName", "content", "status", "documents"]: - if key in message and message.get(key) != existingMessage.get(key): - hasChanges = True - break - - if hasChanges: - # Extract only relevant data for the database - messageData = { - "role": message.get("role", existingMessage.get("role", "unknown")), - "content": message.get("content", existingMessage.get("content", "")), - "agentName": message.get("agentName", existingMessage.get("agentName", "")), - "status": message.get("status", existingMessage.get("status", "completed")), - "documents": message.get("documents", existingMessage.get("documents", [])) - } - self.updateWorkflowMessage(messageId, messageData) - else: - # Message doesn't exist in database yet - # It should have been saved via createWorkflowMessage - # If not, log a warning - logger.warning(f"Message {messageId} in workflow {workflowId} not found in database") - - # Save logs - if saveLogs and "logs" in workflow: - # Get existing logs - existingLogs = {log["id"]: log for log in self.getWorkflowLogs(workflowId)} - - for log in workflow["logs"]: - logId = log.get("id") - if not logId: - continue - - # Extract only relevant data for the database - logData = { - "id": logId, - "workflowId": workflowId, - "message": log.get("message", ""), - "type": log.get("type", "info"), - "timestamp": log.get("timestamp", self._getCurrentTimestamp()), - "agentName": log.get("agentName", "(undefined)"), - "status": log.get("status", "running"), - "progress": log.get("progress", 50) - } - - # Create or update log - if logId in existingLogs: - self.db.recordModify("workflowLogs", logId, logData) - else: - self.db.recordCreate("workflowLogs", logData) - - return True - except Exception as e: - logger.error(f"Error saving workflow state: {str(e)}") - return False - - def loadWorkflowState(self, workflowId: str) -> Optional[Dict[str, Any]]: - """ - Loads the complete state of a workflow from the database. - This includes the workflow itself, messages, and logs. - - Args: - workflowId: ID of the workflow to load - - Returns: - The complete workflow object or None on error - """ - try: - # Load base workflow - workflow = self.getWorkflow(workflowId) - if not workflow: - return None - - # Log the workflow base retrieval - logger.debug(f"Loaded base workflow {workflowId} from database") - - # Load messages - messages = self.getWorkflowMessages(workflowId) - # Sort by sequence number - messages.sort(key=lambda x: x.get("sequenceNo", 0)) - - # Debug log for messages and document counts - messageCount = len(messages) - logger.debug(f"Loaded {messageCount} messages for workflow {workflowId}") - - # Check if messageIds exists and is valid - messageIds = workflow.get("messageIds", []) - if not messageIds or len(messageIds) != len(messages): - # Rebuild messageIds from messages - messageIds = [msg.get("id") for msg in messages] - # Update in database - self.updateWorkflow(workflowId, {"messageIds": messageIds}) - logger.info(f"Rebuilt messageIds for workflow {workflowId}") - - # Log document counts for each message - for msg in messages: - docCount = len(msg.get("documents", [])) - if docCount > 0: - logger.info(f"Message {msg.get('id')} has {docCount} documents loaded from database") - - # Load logs - logs = self.getWorkflowLogs(workflowId) - # Sort by timestamp - logs.sort(key=lambda x: x.get("timestamp", "")) - - # Assemble complete workflow object - completeWorkflow = workflow.copy() - completeWorkflow["messages"] = messages - completeWorkflow["messageIds"] = messageIds # Ensure messageIds is included - completeWorkflow["logs"] = logs - - return completeWorkflow - except Exception as e: - logger.error(f"Error loading workflow state: {str(e)}") - return None - - -# Singleton factory for LucyDOMInterface instances per context -_lucydomInterfaces = {} - -def getLucydomInterface(mandateId: int = 0, userId: int = 0) -> LucyDOMInterface: - """ - Returns a LucyDOMInterface instance for the specified context. - Reuses existing instances. - - Args: - mandateId: ID of the mandate - userId: ID of the user - - Returns: - LucyDOMInterface instance - """ - contextKey = f"{mandateId}_{userId}" - if contextKey not in _lucydomInterfaces: - # Create new interface instance - interface = LucyDOMInterface(mandateId, userId) - # Initialize AI service - aiService = ChatService() - interface.aiService = aiService # Directly set the attribute - _lucydomInterfaces[contextKey] = interface - return _lucydomInterfaces[contextKey] - -# Init -getLucydomInterface() \ No newline at end of file diff --git a/modules/agentEmail.py b/modules/agentEmail.py new file mode 100644 index 00000000..effd0591 --- /dev/null +++ b/modules/agentEmail.py @@ -0,0 +1,656 @@ +""" +Email agent for creating draft emails with Microsoft Graph API. +Creates HTML-formatted email templates with attachments based on input documents. +""" + +import logging +import json +import base64 +import os +import msal +import requests +from typing import Dict, Any, List, Optional +from modules.configuration import APP_CONFIG + +from modules.workflowAgentsRegistry import AgentBase + +logger = logging.getLogger(__name__) + +class AgentEmail(AgentBase): + """AI-driven agent for creating email templates and drafts using Microsoft Graph API""" + + def __init__(self): + """Initialize the email agent""" + super().__init__() + self.name = "email" + self.label = "Email Templates" + self.description = "Creates email templates with HTML-formatted body and attachments from input documents" + self.capabilities = [ + "emailDrafting", + "contentFormatting", + "htmlTemplates", + "documentAttachment", + "msftGraphIntegration" + ] + + # Initialize configuration + self.client_id = None + self.client_secret = None + self.tenant_id = None + self.redirect_uri = None + self.authority = None + self.scopes = ["Mail.ReadWrite", "User.Read"] + + # Token storage directory + self.token_dir = './token_storage' + if not os.path.exists(self.token_dir): + os.makedirs(self.token_dir) + logger.info(f"Created token storage directory: {self.token_dir}") + + def setDependencies(self, mydom=None): + """Set external dependencies for the agent.""" + self.mydom = mydom + self._loadConfiguration() + + def _loadConfiguration(self): + """Load Microsoft Graph API configuration from config files""" + try: + self.client_id = APP_CONFIG.get("Agent_Mail_MSFT_CLIENT_ID") + self.client_secret = APP_CONFIG.get("Agent_Mail_MSFT_CLIENT_SECRET") + self.tenant_id = APP_CONFIG.get("Agent_Mail_MSFT_TENANT_ID", "common") + self.redirect_uri = APP_CONFIG.get("Agent_Mail_MSFT_REDIRECT_URI") + + # Set authority URL + self.authority = f"https://login.microsoftonline.com/{self.tenant_id}" + + logger.info(f"Email agent initialized with tenant ID: {self.tenant_id}") + logger.info(f"Redirect URI: {self.redirect_uri}") + + except Exception as e: + logger.error(f"Error loading Microsoft Graph configuration: {str(e)}") + + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: + """ + Process a task by creating an email template based on input documents. + Sends a login request to the frontend if Microsoft authentication is required. + + Args: + task: Task dictionary with prompt, inputDocuments, outputSpecifications + + Returns: + Dictionary with feedback and documents + """ + try: + # Extract task information + prompt = task.get("prompt", "") + inputDocuments = task.get("inputDocuments", []) + + # Check AI service + if not self.mydom: + return { + "feedback": "The Email agent requires an AI service to function.", + "documents": [] + } + + # Check Microsoft authentication status + user_info, access_token = self._getCurrentUserToken() + + # If not authenticated, trigger frontend authentication flow + if not user_info or not access_token: + # Create authentication instruction document + auth_instructions = self._createFrontendAuthTriggerDocument() + + # Return feedback with authentication trigger log for frontend + return { + "feedback": "⚠️ Microsoft authentication required. Please complete the authentication process when prompted.", + "documents": [auth_instructions], + "log": { + "message": "doMsftLogin", + "type": "system", + "details": "Microsoft authentication required to create email drafts" + } + } + + # Extract document data from input + documentContents, attachments = self._processInputDocuments(inputDocuments) + + # Generate email subject and body using AI + emailTemplate = await self._generateEmailTemplate(prompt, documentContents) + + # Create HTML preview of the email + htmlPreview = self._createHtmlPreview(emailTemplate) + + # Attempt to create a draft email using Microsoft Graph API + draft_result, user_email = self._createDraftEmail( + emailTemplate["recipient"], + emailTemplate["subject"], + emailTemplate["htmlBody"], + attachments + ) + + # Prepare output documents + documents = [] + + # Add HTML preview document + previewDoc = self.formatAgentDocumentOutput( + "email_preview.html", + htmlPreview, + "text/html" + ) + documents.append(previewDoc) + + # Add email template as JSON for reference + templateJson = json.dumps(emailTemplate, indent=2) + templateDoc = self.formatAgentDocumentOutput( + "email_template.json", + templateJson, + "application/json" + ) + documents.append(templateDoc) + + # Prepare feedback message + if draft_result: + feedback = f"Email draft created successfully for {user_email}. The subject is: '{emailTemplate['subject']}'" + if attachments: + feedback += f" with {len(attachments)} attachment(s)" + feedback += ". You can open and edit it in your Outlook draft folder." + else: + feedback = "Email template created but could not save as draft. HTML preview and template are available as documents." + + return { + "feedback": feedback, + "documents": documents + } + + except Exception as e: + logger.error(f"Error in email creation: {str(e)}", exc_info=True) + return { + "feedback": f"Error creating email template: {str(e)}", + "documents": [] + } + + def _createFrontendAuthTriggerDocument(self) -> Dict[str, Any]: + """ + Create a simple document that explains authentication is required. + This document is minimal as the actual authentication will be handled by frontend. + + Returns: + Document dictionary + """ + html_content = """ + + + + + Microsoft Authentication Required + + + +
+

Microsoft Authentication Required

+ +

To create email templates and drafts, you need to authenticate with your Microsoft account.

+ +

The application will now initiate the Microsoft authentication process. Please follow the instructions in the authentication window.

+ +
+

Note: You only need to authenticate once. Your session will be remembered for future email operations.

+
+
+ + + """ + + return self.formatAgentDocumentOutput( + "microsoft_authentication.html", + html_content, + "text/html" + ) + + def _processInputDocuments(self, documents: List[Dict[str, Any]]) -> tuple: + """ + Process input documents to extract content and prepare attachments. + + Args: + documents: List of input documents + + Returns: + Tuple of (document content text, list of attachments) + """ + documentContents = [] + attachments = [] + + for doc in documents: + docName = doc.get("name", "unnamed") + if doc.get("ext"): + docName = f"{docName}.{doc.get('ext')}" + + # Add document name to contents + documentContents.append(f"\n\n--- {docName} ---\n") + + # Process contents + hasAttachment = False + for content in doc.get("contents", []): + # Add extracted text to document contents + if content.get("dataExtracted"): + documentContents.append(content.get("dataExtracted", "")) + + # Prepare attachment if it has content data + if content.get("data"): + # Check if this content should be an attachment + # Typically files like PDFs, images, etc. + contentType = content.get("contentType", "") + if (not contentType.startswith("text/") or + contentType in ["application/pdf", "application/msword"]): + hasAttachment = True + + # If document has content to attach, add to attachments + if hasAttachment: + attachments.append({ + "name": docName, + "document": doc + }) + + return "\n".join(documentContents), attachments + + async def _generateEmailTemplate(self, prompt: str, documentContents: str) -> Dict[str, Any]: + """ + Generate email template using AI. + + Args: + prompt: The task prompt + documentContents: Extracted document content + + Returns: + Email template dictionary with recipient, subject, body + """ + emailPrompt = f""" + Create an email based on the following request: + + REQUEST: {prompt} + + DOCUMENT CONTENTS: + {documentContents[:2000]}... (truncated if longer) + + Generate an email template with: + 1. A relevant recipient (use placeholder or derive from content if possible) + 2. A concise but descriptive subject line + 3. A professional HTML-formatted email body + 4. Appropriate greeting and closing + + Format your response as JSON with these fields: + - recipient: email address + - subject: subject line + - plainBody: plain text version + - htmlBody: HTML formatted version + + Only return valid JSON. No preamble or explanations. + """ + + try: + response = await self.mydom.callAi([ + {"role": "system", "content": "You are an email template specialist. Respond with valid JSON only."}, + {"role": "user", "content": emailPrompt} + ], produceUserAnswer=True) + + # Extract JSON from response + jsonStart = response.find('{') + jsonEnd = response.rfind('}') + 1 + + if jsonStart >= 0 and jsonEnd > jsonStart: + template = json.loads(response[jsonStart:jsonEnd]) + return template + else: + # Fallback if JSON not found + return { + "recipient": "recipient@example.com", + "subject": "Information Regarding Your Request", + "plainBody": f"This email is regarding your request: {prompt}", + "htmlBody": f"

This email is regarding your request: {prompt}

" + } + + except Exception as e: + logger.warning(f"Error generating email template: {str(e)}") + return { + "recipient": "recipient@example.com", + "subject": "Information Regarding Your Request", + "plainBody": f"This email is regarding your request: {prompt}", + "htmlBody": f"

This email is regarding your request: {prompt}

" + } + + def _createHtmlPreview(self, emailTemplate: Dict[str, Any]) -> str: + """ + Create an HTML preview of the email template. + + Args: + emailTemplate: Email template dictionary + + Returns: + HTML string for preview + """ + html = f""" + + + + + Email Preview: {emailTemplate.get('subject', 'Email Template')} + + + +
+ + + +
+ + + """ + return html + + def _getCurrentUserToken(self): + """ + Get the current user's token from the token store. + Does not attempt to initiate authentication flow. + + Returns: + Tuple of (user info, access token) or (None, None) if no valid token + """ + try: + # Check if we have any token files + if not os.path.exists(self.token_dir) or not os.listdir(self.token_dir): + logger.warning("No token files found. User needs to authenticate with Microsoft.") + return None, None + + # Find the most recently modified token file + token_files = [os.path.join(self.token_dir, f) for f in os.listdir(self.token_dir) if f.endswith('.json')] + if not token_files: + return None, None + + most_recent = max(token_files, key=os.path.getmtime) + user_id = os.path.basename(most_recent).split('.')[0] + + # Load the token + token_data = self._loadTokenFromFile(user_id) + if not token_data or not token_data.get("access_token"): + logger.warning(f"No valid token data for user {user_id}") + return None, None + + # Get user info from token + user_info = self._getUserInfoFromToken(token_data["access_token"]) + if not user_info: + # Try to refresh the token + if self._refreshToken(user_id): + # Load the refreshed token + token_data = self._loadTokenFromFile(user_id) + if token_data and token_data.get("access_token"): + user_info = self._getUserInfoFromToken(token_data["access_token"]) + if user_info: + return user_info, token_data["access_token"] + + logger.warning(f"Could not get user info for user {user_id}") + return None, None + + return user_info, token_data["access_token"] + except Exception as e: + logger.error(f"Error getting current user token: {str(e)}") + return None, None + + def _loadTokenFromFile(self, user_id): + """Load token data from a file""" + filename = os.path.join(self.token_dir, f"{user_id}.json") + if os.path.exists(filename): + try: + with open(filename, 'r') as f: + return json.load(f) + except Exception as e: + logger.error(f"Error loading token file: {str(e)}") + return None + return None + + def _getUserInfoFromToken(self, access_token): + """Get user information using the access token""" + headers = { + 'Authorization': f'Bearer {access_token}', + 'Content-Type': 'application/json' + } + + try: + response = requests.get('https://graph.microsoft.com/v1.0/me', headers=headers) + if response.status_code == 200: + user_data = response.json() + return { + "name": user_data.get("displayName", ""), + "email": user_data.get("userPrincipalName", ""), + "id": user_data.get("id", "") + } + else: + logger.error(f"Error getting user info: {response.status_code} - {response.text}") + return None + except Exception as e: + logger.error(f"Exception getting user info: {str(e)}") + return None + + def _refreshToken(self, user_id): + """Refresh the access token using the stored refresh token""" + token_data = self._loadTokenFromFile(user_id) + if not token_data or not token_data.get("refresh_token"): + logger.warning("No refresh token available") + return False + + msal_app = msal.ConfidentialClientApplication( + self.client_id, + authority=self.authority, + client_credential=self.client_secret + ) + + result = msal_app.acquire_token_by_refresh_token( + token_data["refresh_token"], + scopes=self.scopes + ) + + if "error" in result: + logger.error(f"Error refreshing token: {result.get('error')}") + return False + + # Update tokens in storage + token_data["access_token"] = result["access_token"] + if "refresh_token" in result: + token_data["refresh_token"] = result["refresh_token"] + + # Save the updated token + filename = os.path.join(self.token_dir, f"{user_id}.json") + try: + with open(filename, 'w') as f: + json.dump(token_data, f) + logger.info(f"Token saved for user: {user_id}") + return True + except Exception as e: + logger.error(f"Error saving token file: {str(e)}") + return False + + def _createDraftEmail(self, recipient, subject, body, attachments=None): + """Create a draft email using Microsoft Graph API""" + try: + # Get current user token + user_info, access_token = self._getCurrentUserToken() + + if not user_info or not access_token: + logger.warning("No authenticated user found, cannot create draft email") + return False, None + + # Create draft email using Graph API + email_result = self._createGraphDraftEmail(access_token, recipient, subject, body, attachments) + + if email_result: + return True, user_info.get("email") + else: + return False, user_info.get("email") + + except Exception as e: + logger.error(f"Error in creating draft email: {str(e)}") + return False, None + + def _createGraphDraftEmail(self, access_token, recipient, subject, body, attachments=None): + """ + Create a draft email using Microsoft Graph API with fixed attachment handling. + Directly uses the document's data attribute for attachments. + + Args: + access_token: Microsoft Graph access token + recipient: Email recipient + subject: Email subject + body: HTML body of the email + attachments: List of attachments + + Returns: + Draft result or None if failed + """ + headers = { + 'Authorization': f'Bearer {access_token}', + 'Content-Type': 'application/json' + } + + # Prepare email data + email_data = { + 'subject': subject, + 'body': { + 'contentType': 'HTML', + 'content': body + }, + 'toRecipients': [ + { + 'emailAddress': { + 'address': recipient + } + } + ] + } + + # Add attachments if available + if attachments and len(attachments) > 0: + email_data['attachments'] = [] + + for attachment in attachments: + # Get the document object + doc = attachment.get('document', {}) + file_name = attachment.get('name', 'attachment.file') + + logger.info(f"Processing attachment: {file_name}") + + # Directly access the data attribute from the document + if 'data' in doc: + file_content = doc['data'] + is_base64 = doc.get('base64Encoded', False) + + # Determine content type + content_type = "application/octet-stream" + if 'mimeType' in doc: + content_type = doc['mimeType'] + elif 'contentType' in doc: + content_type = doc['contentType'] + + # Check if we need to encode the content + if not is_base64: + logger.info(f"Base64 encoding content for {file_name}") + if isinstance(file_content, str): + try: + # Check if already valid base64 + base64.b64decode(file_content) + logger.info("Content appears to be valid base64 already") + except: + # Not valid base64, encode it + logger.info("Encoding string content to base64") + file_content = base64.b64encode(file_content.encode('utf-8')).decode('utf-8') + elif isinstance(file_content, bytes): + logger.info("Encoding bytes content to base64") + file_content = base64.b64encode(file_content).decode('utf-8') + + # Add attachment to email data + logger.info(f"Adding attachment: {file_name} ({content_type})") + attachment_data = { + '@odata.type': '#microsoft.graph.fileAttachment', + 'name': file_name, + 'contentType': content_type, + 'contentBytes': file_content + } + email_data['attachments'].append(attachment_data) + logger.info(f"Successfully added attachment: {file_name}") + else: + logger.warning(f"Document does not contain 'data' attribute: {file_name}") + # Try to find data in the fileId + if 'fileId' in doc: + logger.info(f"Found fileId: {doc['fileId']} - could implement fileId-based attachment lookup here") + # Future enhancement: implement file lookup by fileId + + # Try to create draft using drafts folder endpoint (Option 1) + try: + logger.info("Attempting to create draft email using drafts folder endpoint") + logger.info(f"Email data structure: subject={subject}, recipient={recipient}, " + + f"has_attachments={bool(email_data.get('attachments'))}, " + + f"attachment_count={len(email_data.get('attachments', []))}") + + response = requests.post( + 'https://graph.microsoft.com/v1.0/me/mailFolders/drafts/messages', + headers=headers, + json=email_data + ) + + if response.status_code >= 200 and response.status_code < 300: + logger.info("Successfully created draft email using drafts folder endpoint") + return response.json() + else: + logger.error(f"Drafts folder method failed: {response.status_code} - {response.text}") + + # Try fallback method with messages endpoint (Option 2) + logger.info("Trying fallback with messages endpoint") + response = requests.post( + 'https://graph.microsoft.com/v1.0/me/messages', + headers=headers, + json=email_data + ) + + if response.status_code >= 200 and response.status_code < 300: + logger.info("Successfully created draft email using messages endpoint") + return response.json() + else: + logger.error(f"Messages endpoint method also failed: {response.status_code} - {response.text}") + return None + + except Exception as e: + logger.error(f"Exception creating draft email: {str(e)}", exc_info=True) + return None + +# Factory function for the Email agent +def getAgentEmail(): + """Returns an instance of the Email agent.""" + return AgentEmail() \ No newline at end of file diff --git a/modules/lucydomInterface.py b/modules/lucydomInterface.py index f0f91b2d..b1fc5dcb 100644 --- a/modules/lucydomInterface.py +++ b/modules/lucydomInterface.py @@ -145,7 +145,13 @@ class LucyDOMInterface: "userId": self.userId, "content": "Gib mir die ersten 1000 Primzahlen", "name": "Code: Primzahlen" - } + }, + { + "mandateId": self.mandateId, + "userId": self.userId, + "content": "Bereite mir eine formelle E-Mail an peter.muster@domain.com vor, um meinen Termin von 10 Uhr auf Freitag zu scheiben.", + "name": "Mail: Vorbereitung" + }, ] # Create prompts diff --git a/requirements.txt b/requirements.txt index 8e6dfa11..178e1730 100644 --- a/requirements.txt +++ b/requirements.txt @@ -42,4 +42,7 @@ python-dateutil==2.8.2 python-dotenv==1.0.0 ## Dependencies for trio (used by httpx) -sortedcontainers>=2.4.0 # Required by trio \ No newline at end of file +sortedcontainers>=2.4.0 # Required by trio + +## MSFT Integration +msal==1.24.1 diff --git a/routes/routeMsft.py b/routes/routeMsft.py new file mode 100644 index 00000000..1f81793d --- /dev/null +++ b/routes/routeMsft.py @@ -0,0 +1,405 @@ +from fastapi import APIRouter, HTTPException, Depends, Request, Response, status, Cookie +from fastapi.responses import HTMLResponse, RedirectResponse, JSONResponse +import msal +import os +import logging +import sys +import json +from typing import Dict, Any, Optional +from datetime import datetime, timedelta + +from modules.auth import getCurrentActiveUser, getUserContext +from modules.configuration import APP_CONFIG +from modules.lucydomInterface import getLucydomInterface + +# Configure logger +logger = logging.getLogger(__name__) + +# Create router for Microsoft Auth endpoints +router = APIRouter( + prefix="/api/msft", + tags=["Microsoft"], + responses={ + 404: {"description": "Not found"}, + 400: {"description": "Bad request"}, + 401: {"description": "Unauthorized"}, + 403: {"description": "Forbidden"}, + 500: {"description": "Internal server error"} + } +) + +# Azure AD configuration - load from config +CLIENT_ID = APP_CONFIG.get("Agent_Mail_MSFT_CLIENT_ID") +CLIENT_SECRET = APP_CONFIG.get("Agent_Mail_MSFT_CLIENT_SECRET") +TENANT_ID = APP_CONFIG.get("Agent_Mail_MSFT_TENANT_ID", "common") # Use 'common' for multi-tenant +AUTHORITY = f"https://login.microsoftonline.com/{TENANT_ID}" +SCOPES = ["Mail.ReadWrite", "User.Read"] +REDIRECT_URI = APP_CONFIG.get("Agent_Mail_MSFT_REDIRECT_URI") + +# Initialize MSAL application +app_config = { + "client_id": CLIENT_ID, + "client_credential": CLIENT_SECRET, + "authority": AUTHORITY, + "redirect_uri": REDIRECT_URI +} + +# Create a simple file-based token storage +TOKEN_DIR = './token_storage' +if not os.path.exists(TOKEN_DIR): + os.makedirs(TOKEN_DIR) + logger.info(f"Created token storage directory: {TOKEN_DIR}") + +def save_token_to_file(user_id: str, token_data: Dict[str, Any]): + """Save token data to a file""" + filename = os.path.join(TOKEN_DIR, f"{user_id}.json") + with open(filename, 'w') as f: + json.dump(token_data, f) + logger.info(f"Token saved for user: {user_id}") + +def load_token_from_file(user_id: str) -> Optional[Dict[str, Any]]: + """Load token data from a file""" + filename = os.path.join(TOKEN_DIR, f"{user_id}.json") + if os.path.exists(filename): + with open(filename, 'r') as f: + return json.load(f) + return None + +def get_user_info_from_token(access_token: str) -> Optional[Dict[str, Any]]: + """Get user information using the access token""" + import requests + headers = { + 'Authorization': f'Bearer {access_token}', + 'Content-Type': 'application/json' + } + + try: + response = requests.get('https://graph.microsoft.com/v1.0/me', headers=headers) + if response.status_code == 200: + user_data = response.json() + return { + "name": user_data.get("displayName", ""), + "email": user_data.get("userPrincipalName", ""), + "id": user_data.get("id", "") + } + else: + logger.error(f"Error getting user info: {response.status_code} - {response.text}") + return None + except Exception as e: + logger.error(f"Exception getting user info: {str(e)}") + return None + +def verify_token(token: str) -> bool: + """Verify the access token is valid""" + import requests + headers = { + 'Authorization': f'Bearer {token}', + 'Content-Type': 'application/json' + } + + try: + logger.info("Verifying token validity...") + response = requests.get('https://graph.microsoft.com/v1.0/me', headers=headers) + + if response.status_code == 200: + logger.info("Token verification successful") + return True + else: + logger.error(f"Token verification failed: {response.status_code} - {response.text}") + return False + except Exception as e: + logger.error(f"Exception verifying token: {str(e)}") + return False + +def refresh_token(user_id: str) -> bool: + """Refresh the access token using the stored refresh token""" + token_data = load_token_from_file(user_id) + if not token_data or not token_data.get("refresh_token"): + logger.warning("No refresh token available") + return False + + msal_app = msal.ConfidentialClientApplication( + app_config["client_id"], + authority=app_config["authority"], + client_credential=app_config["client_credential"] + ) + + result = msal_app.acquire_token_by_refresh_token( + token_data["refresh_token"], + scopes=SCOPES + ) + + if "error" in result: + logger.error(f"Error refreshing token: {result.get('error')}") + return False + + # Update tokens in storage + token_data["access_token"] = result["access_token"] + if "refresh_token" in result: + token_data["refresh_token"] = result["refresh_token"] + + save_token_to_file(user_id, token_data) + logger.info("Access token refreshed successfully") + return True + +def silent_login(user_id: str) -> bool: + """Try to silently log in a user using their refresh token""" + token_data = load_token_from_file(user_id) + if not token_data or not token_data.get("refresh_token"): + logger.info(f"No refresh token found for user: {user_id}") + return False + + # Try to refresh the token + msal_app = msal.ConfidentialClientApplication( + app_config["client_id"], + authority=app_config["authority"], + client_credential=app_config["client_credential"] + ) + + result = msal_app.acquire_token_by_refresh_token( + token_data["refresh_token"], + scopes=SCOPES + ) + + if "error" in result: + logger.error(f"Error refreshing token: {result.get('error')}") + return False + + # Update tokens in storage + token_data["access_token"] = result["access_token"] + if "refresh_token" in result: + token_data["refresh_token"] = result["refresh_token"] + + save_token_to_file(user_id, token_data) + + return True + +@router.get("/login") +async def login(): + # Modified implementation without requiring current user + try: + # Create a confidential client application + msal_app = msal.ConfidentialClientApplication( + app_config["client_id"], + authority=app_config["authority"], + client_credential=app_config["client_credential"] + ) + + # Build the auth URL + auth_url = msal_app.get_authorization_request_url( + SCOPES, + state="anonymous-user", # Use a general state since we don't have user context + redirect_uri=app_config["redirect_uri"] + ) + + logger.info(f"Redirecting to Microsoft login: {auth_url[:60]}...") + return RedirectResponse(auth_url) + + except Exception as e: + logger.error(f"Error initiating Microsoft login: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Error initiating Microsoft login: {str(e)}" + ) + +@router.get("/auth/callback") +async def auth_callback(request: Request, code: str = None, state: str = None): + """Handle callback from Microsoft login""" + try: + # Log callback for debugging + logger.info("Received callback from Microsoft login") + + if not code: + logger.error("No authorization code received in callback") + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content={"message": "No authorization code received"} + ) + + # Extract user and mandate info from state if available + user_id = None + mandate_id = None + + if state and state != "anonymous-user": + try: + mandate_id, user_id = state.split(":") + logger.info(f"State contains mandate_id: {mandate_id}, user_id: {user_id}") + except ValueError: + logger.warning(f"Invalid state format: {state}") + # Generate a generic user ID if state is invalid + user_id = f"user_{datetime.now().strftime('%Y%m%d%H%M%S')}" + else: + # For anonymous authentication, create a generic user ID + logger.info("Anonymous authentication (no user context)") + user_id = f"user_{datetime.now().strftime('%Y%m%d%H%M%S')}" + + # Create a confidential client application + msal_app = msal.ConfidentialClientApplication( + app_config["client_id"], + authority=app_config["authority"], + client_credential=app_config["client_credential"] + ) + + # Get tokens using the authorization code + result = msal_app.acquire_token_by_authorization_code( + code, + scopes=SCOPES, + redirect_uri=app_config["redirect_uri"] + ) + + if "error" in result: + logger.error(f"Error acquiring token: {result.get('error')}") + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content={"message": f"Error acquiring token: {result.get('error_description', result.get('error'))}"} + ) + + # Store user information + user_info = {} + if "id_token_claims" in result: + user_info = { + "name": result["id_token_claims"].get("name", ""), + "email": result["id_token_claims"].get("preferred_username", ""), + } + + # If we have user info from the token, use that for user_id + token_user_id = result["id_token_claims"].get("oid") or result["id_token_claims"].get("sub") + if token_user_id: + user_id = token_user_id + elif not user_id and user_info.get("email"): + # Fall back to email-based ID if no other ID is available + user_id = user_info.get("email", "user").replace("@", "_").replace(".", "_") + + # Save tokens to file + token_data = { + "access_token": result["access_token"], + "refresh_token": result.get("refresh_token", ""), + "user_info": user_info, + "timestamp": datetime.now().isoformat() + } + + # Ensure token directory exists + if not os.path.exists(TOKEN_DIR): + os.makedirs(TOKEN_DIR) + + # Save token to file + token_file = os.path.join(TOKEN_DIR, f"{user_id}.json") + with open(token_file, 'w') as f: + json.dump(token_data, f) + + logger.info(f"User authenticated: {user_info.get('email', 'unknown')}") + + # Create a success page + html_content = """ + + + + + Authentication Successful + + + +
+

Authentication Successful

+
+

You have successfully authenticated with Microsoft.

+

You can now close this tab and return to the application.

+

Your email templates will now be able to create drafts in your mailbox.

+ Close Window +
+ + + + """ + + return HTMLResponse(content=html_content) + + else: + logger.warning("No id_token_claims found in result") + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content={"message": "Failed to retrieve user information"} + ) + + except Exception as e: + logger.error(f"Error in auth callback: {str(e)}", exc_info=True) + return JSONResponse( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + content={"message": f"Error in auth callback: {str(e)}"} + ) + +@router.get("/status") +async def auth_status( + msft_user_id: Optional[str] = Cookie(None), + currentUser: Dict[str, Any] = Depends(getCurrentActiveUser) +): + """Check Microsoft authentication status""" + try: + # Get user ID + if not msft_user_id: + mandateId, userId = await getUserContext(currentUser) + user_id = str(userId) + else: + user_id = msft_user_id + + # Check if user has a token + token_data = load_token_from_file(user_id) + if not token_data: + return JSONResponse( + content={"authenticated": False, "message": "Not authenticated with Microsoft"} + ) + + # Check if token is valid + if not verify_token(token_data.get("access_token", "")): + # Try to refresh token + if refresh_token(user_id): + token_data = load_token_from_file(user_id) + user_info = token_data.get("user_info", {}) + return JSONResponse( + content={ + "authenticated": True, + "message": "Token refreshed successfully", + "user": user_info + } + ) + else: + return JSONResponse( + content={ + "authenticated": False, + "message": "Token expired and couldn't be refreshed" + } + ) + + # Token is valid, return user info + user_info = token_data.get("user_info", {}) + return JSONResponse( + content={ + "authenticated": True, + "message": "Authenticated with Microsoft", + "user": user_info + } + ) + + except Exception as e: + logger.error(f"Error checking auth status: {str(e)}") + return JSONResponse( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + content={"message": f"Error checking auth status: {str(e)}"} + ) diff --git a/static/10_email_preview.html b/static/10_email_preview.html new file mode 100644 index 00000000..c900e097 --- /dev/null +++ b/static/10_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Verschiebung des Meetings auf Freitag + + + +
+ + + +
+ + + \ No newline at end of file diff --git a/static/11_email_template.json b/static/11_email_template.json new file mode 100644 index 00000000..bf14e27b --- /dev/null +++ b/static/11_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "peter.muster@domain.com", + "subject": "Verschiebung des Meetings auf Freitag", + "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.\n\nVielen Dank f\u00fcr Ihr Verst\u00e4ndnis.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", + "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.

Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

" +} \ No newline at end of file diff --git a/static/12_email_preview.html b/static/12_email_preview.html new file mode 100644 index 00000000..87962b01 --- /dev/null +++ b/static/12_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Anfrage zur Terminverschiebung + + + +
+ + + +
+ + + \ No newline at end of file diff --git a/static/13_email_template.json b/static/13_email_template.json new file mode 100644 index 00000000..ab8a946c --- /dev/null +++ b/static/13_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "peter.muster@domain.com", + "subject": "Anfrage zur Terminverschiebung", + "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, diese Nachricht trifft Sie wohl. Ich schreibe Ihnen, um eine Verschiebung unseres Termins von 10 Uhr auf Freitag zu erbitten. Bitte lassen Sie mich wissen, ob dies f\u00fcr Sie m\u00f6glich ist.\n\nVielen Dank im Voraus f\u00fcr Ihre Flexibilit\u00e4t.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", + "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, diese Nachricht trifft Sie wohl. Ich schreibe Ihnen, um eine Verschiebung unseres Termins von 10 Uhr auf Freitag zu erbitten. Bitte lassen Sie mich wissen, ob dies f\u00fcr Sie m\u00f6glich ist.

Vielen Dank im Voraus f\u00fcr Ihre Flexibilit\u00e4t.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

" +} \ No newline at end of file diff --git a/static/14_microsoft_authentication.html b/static/14_microsoft_authentication.html new file mode 100644 index 00000000..b8a50d7f --- /dev/null +++ b/static/14_microsoft_authentication.html @@ -0,0 +1,47 @@ + + + + + + Microsoft Authentication Required + + + +
+

Microsoft Authentication Required

+ +

To create email templates and drafts, you need to authenticate with your Microsoft account. Follow these steps:

+ +
+ 1 + Click the authentication link below +
+ + Authenticate with Microsoft + +
+ 2 + Sign in with your Microsoft account and grant the required permissions +
+ +
+ 3 + Return to this application and run the email agent again after completing authentication +
+ +
+

Note: You only need to authenticate once. Your session will be remembered for future email operations.

+
+
+ + + \ No newline at end of file diff --git a/static/15_microsoft_authentication.html b/static/15_microsoft_authentication.html new file mode 100644 index 00000000..521bae1c --- /dev/null +++ b/static/15_microsoft_authentication.html @@ -0,0 +1,28 @@ + + + + + + Microsoft Authentication Required + + + +
+

Microsoft Authentication Required

+ +

To create email templates and drafts, you need to authenticate with your Microsoft account.

+ +

The application will now initiate the Microsoft authentication process. Please follow the instructions in the authentication window.

+ +
+

Note: You only need to authenticate once. Your session will be remembered for future email operations.

+
+
+ + + \ No newline at end of file diff --git a/static/16_email_preview.html b/static/16_email_preview.html new file mode 100644 index 00000000..95096bad --- /dev/null +++ b/static/16_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Verschiebung des Meetings auf Freitag + + + +
+ + + +
+ + + \ No newline at end of file diff --git a/static/17_email_template.json b/static/17_email_template.json new file mode 100644 index 00000000..90e8f9f3 --- /dev/null +++ b/static/17_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "peter.muster@domain.com", + "subject": "Verschiebung des Meetings auf Freitag", + "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting um 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser Termin f\u00fcr Sie passt.\n\nVielen Dank f\u00fcr Ihr Verst\u00e4ndnis.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", + "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting um 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser Termin f\u00fcr Sie passt.

Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.

Mit freundlichen Gr\u00fc\u00dfen,

[Ihr Name]

" +} \ No newline at end of file diff --git a/static/18_generated_code.py b/static/18_generated_code.py new file mode 100644 index 00000000..b53f58c4 --- /dev/null +++ b/static/18_generated_code.py @@ -0,0 +1,48 @@ +inputFiles = [] # DO NOT CHANGE THIS LINE + +# REQUIREMENTS: + +import json +import csv +from io import StringIO + +def is_prime(n): + if n <= 1: + return False + if n <= 3: + return True + if n % 2 == 0 or n % 3 == 0: + return False + i = 5 + while i * i <= n: + if n % i == 0 or n % (i + 2) == 0: + return False + i += 6 + return True + +def generate_primes(limit): + primes = [] + num = 2 + while len(primes) < limit: + if is_prime(num): + primes.append(num) + num += 1 + return primes + +primes = generate_primes(1000) + +output = StringIO() +csv_writer = csv.writer(output) +for prime in primes: + csv_writer.writerow([prime]) + +result = { + "prime_numbers.csv": { + "content": output.getvalue(), + "base64Encoded": False, + "contentType": "text/csv" + } +} + +import json +print(json.dumps(result)) \ No newline at end of file diff --git a/static/19_execution_history.json b/static/19_execution_history.json new file mode 100644 index 00000000..8b61dc57 --- /dev/null +++ b/static/19_execution_history.json @@ -0,0 +1,19 @@ +[ + { + "attempt": 1, + "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\nimport json\nimport csv\nfrom io import StringIO\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\n\noutput = StringIO()\ncsv_writer = csv.writer(output)\nfor prime in primes:\n csv_writer.writerow([prime])\n\nresult = {\n \"prime_numbers.csv\": {\n \"content\": output.getvalue(),\n \"base64Encoded\": False,\n \"contentType\": \"text/csv\"\n }\n}\n\nimport json\nprint(json.dumps(result))", + "result": { + "success": true, + "output": "{\"prime_numbers.csv\": {\"content\": \"2\\r\\n3\\r\\n5\\r\\n7\\r\\n11\\r\\n13\\r\\n17\\r\\n19\\r\\n23\\r\\n29\\r\\n31\\r\\n37\\r\\n41\\r\\n43\\r\\n47\\r\\n53\\r\\n59\\r\\n61\\r\\n67\\r\\n71\\r\\n73\\r\\n79\\r\\n83\\r\\n89\\r\\n97\\r\\n101\\r\\n103\\r\\n107\\r\\n109\\r\\n113\\r\\n127\\r\\n131\\r\\n137\\r\\n139\\r\\n149\\r\\n151\\r\\n157\\r\\n163\\r\\n167\\r\\n173\\r\\n179\\r\\n181\\r\\n191\\r\\n193\\r\\n197\\r\\n199\\r\\n211\\r\\n223\\r\\n227\\r\\n229\\r\\n233\\r\\n239\\r\\n241\\r\\n251\\r\\n257\\r\\n263\\r\\n269\\r\\n271\\r\\n277\\r\\n281\\r\\n283\\r\\n293\\r\\n307\\r\\n311\\r\\n313\\r\\n317\\r\\n331\\r\\n337\\r\\n347\\r\\n349\\r\\n353\\r\\n359\\r\\n367\\r\\n373\\r\\n379\\r\\n383\\r\\n389\\r\\n397\\r\\n401\\r\\n409\\r\\n419\\r\\n421\\r\\n431\\r\\n433\\r\\n439\\r\\n443\\r\\n449\\r\\n457\\r\\n461\\r\\n463\\r\\n467\\r\\n479\\r\\n487\\r\\n491\\r\\n499\\r\\n503\\r\\n509\\r\\n521\\r\\n523\\r\\n541\\r\\n547\\r\\n557\\r\\n563\\r\\n569\\r\\n571\\r\\n577\\r\\n587\\r\\n593\\r\\n599\\r\\n601\\r\\n607\\r\\n613\\r\\n617\\r\\n619\\r\\n631\\r\\n641\\r\\n643\\r\\n647\\r\\n653\\r\\n659\\r\\n661\\r\\n673\\r\\n677\\r\\n683\\r\\n691\\r\\n701\\r\\n709\\r\\n719\\r\\n727\\r\\n733\\r\\n739\\r\\n743\\r\\n751\\r\\n757\\r\\n761\\r\\n769\\r\\n773\\r\\n787\\r\\n797\\r\\n809\\r\\n811\\r\\n821\\r\\n823\\r\\n827\\r\\n829\\r\\n839\\r\\n853\\r\\n857\\r\\n859\\r\\n863\\r\\n877\\r\\n881\\r\\n883\\r\\n887\\r\\n907\\r\\n911\\r\\n919\\r\\n929\\r\\n937\\r\\n941\\r\\n947\\r\\n953\\r\\n967\\r\\n971\\r\\n977\\r\\n983\\r\\n991\\r\\n997\\r\\n1009\\r\\n1013\\r\\n1019\\r\\n1021\\r\\n1031\\r\\n1033\\r\\n1039\\r\\n1049\\r\\n1051\\r\\n1061\\r\\n1063\\r\\n1069\\r\\n1087\\r\\n1091\\r\\n1093\\r\\n1097\\r\\n1103\\r\\n1109\\r\\n1117\\r\\n1123\\r\\n1129\\r\\n1151\\r\\n1153\\r\\n1163\\r\\n1171\\r\\n1181\\r\\n1187\\r\\n1193\\r\\n1201\\r\\n1213\\r\\n1217\\r\\n1223\\r\\n1229\\r\\n1231\\r\\n1237\\r\\n1249\\r\\n1259\\r\\n1277\\r\\n1279\\r\\n1283\\r\\n1289\\r\\n1291\\r\\n1297\\r\\n1301\\r\\n1303\\r\\n1307\\r\\n1319\\r\\n1321\\r\\n1327\\r\\n1361\\r\\n1367\\r\\n1373\\r\\n1381\\r\\n1399\\r\\n1409\\r\\n1423\\r\\n1427\\r\\n1429\\r\\n1433\\r\\n1439\\r\\n1447\\r\\n1451\\r\\n1453\\r\\n1459\\r\\n1471\\r\\n1481\\r\\n1483\\r\\n1487\\r\\n1489\\r\\n1493\\r\\n1499\\r\\n1511\\r\\n1523\\r\\n1531\\r\\n1543\\r\\n1549\\r\\n1553\\r\\n1559\\r\\n1567\\r\\n1571\\r\\n1579\\r\\n1583\\r\\n1597\\r\\n1601\\r\\n1607\\r\\n1609\\r\\n1613\\r\\n1619\\r\\n1621\\r\\n1627\\r\\n1637\\r\\n1657\\r\\n1663\\r\\n1667\\r\\n1669\\r\\n1693\\r\\n1697\\r\\n1699\\r\\n1709\\r\\n1721\\r\\n1723\\r\\n1733\\r\\n1741\\r\\n1747\\r\\n1753\\r\\n1759\\r\\n1777\\r\\n1783\\r\\n1787\\r\\n1789\\r\\n1801\\r\\n1811\\r\\n1823\\r\\n1831\\r\\n1847\\r\\n1861\\r\\n1867\\r\\n1871\\r\\n1873\\r\\n1877\\r\\n1879\\r\\n1889\\r\\n1901\\r\\n1907\\r\\n1913\\r\\n1931\\r\\n1933\\r\\n1949\\r\\n1951\\r\\n1973\\r\\n1979\\r\\n1987\\r\\n1993\\r\\n1997\\r\\n1999\\r\\n2003\\r\\n2011\\r\\n2017\\r\\n2027\\r\\n2029\\r\\n2039\\r\\n2053\\r\\n2063\\r\\n2069\\r\\n2081\\r\\n2083\\r\\n2087\\r\\n2089\\r\\n2099\\r\\n2111\\r\\n2113\\r\\n2129\\r\\n2131\\r\\n2137\\r\\n2141\\r\\n2143\\r\\n2153\\r\\n2161\\r\\n2179\\r\\n2203\\r\\n2207\\r\\n2213\\r\\n2221\\r\\n2237\\r\\n2239\\r\\n2243\\r\\n2251\\r\\n2267\\r\\n2269\\r\\n2273\\r\\n2281\\r\\n2287\\r\\n2293\\r\\n2297\\r\\n2309\\r\\n2311\\r\\n2333\\r\\n2339\\r\\n2341\\r\\n2347\\r\\n2351\\r\\n2357\\r\\n2371\\r\\n2377\\r\\n2381\\r\\n2383\\r\\n2389\\r\\n2393\\r\\n2399\\r\\n2411\\r\\n2417\\r\\n2423\\r\\n2437\\r\\n2441\\r\\n2447\\r\\n2459\\r\\n2467\\r\\n2473\\r\\n2477\\r\\n2503\\r\\n2521\\r\\n2531\\r\\n2539\\r\\n2543\\r\\n2549\\r\\n2551\\r\\n2557\\r\\n2579\\r\\n2591\\r\\n2593\\r\\n2609\\r\\n2617\\r\\n2621\\r\\n2633\\r\\n2647\\r\\n2657\\r\\n2659\\r\\n2663\\r\\n2671\\r\\n2677\\r\\n2683\\r\\n2687\\r\\n2689\\r\\n2693\\r\\n2699\\r\\n2707\\r\\n2711\\r\\n2713\\r\\n2719\\r\\n2729\\r\\n2731\\r\\n2741\\r\\n2749\\r\\n2753\\r\\n2767\\r\\n2777\\r\\n2789\\r\\n2791\\r\\n2797\\r\\n2801\\r\\n2803\\r\\n2819\\r\\n2833\\r\\n2837\\r\\n2843\\r\\n2851\\r\\n2857\\r\\n2861\\r\\n2879\\r\\n2887\\r\\n2897\\r\\n2903\\r\\n2909\\r\\n2917\\r\\n2927\\r\\n2939\\r\\n2953\\r\\n2957\\r\\n2963\\r\\n2969\\r\\n2971\\r\\n2999\\r\\n3001\\r\\n3011\\r\\n3019\\r\\n3023\\r\\n3037\\r\\n3041\\r\\n3049\\r\\n3061\\r\\n3067\\r\\n3079\\r\\n3083\\r\\n3089\\r\\n3109\\r\\n3119\\r\\n3121\\r\\n3137\\r\\n3163\\r\\n3167\\r\\n3169\\r\\n3181\\r\\n3187\\r\\n3191\\r\\n3203\\r\\n3209\\r\\n3217\\r\\n3221\\r\\n3229\\r\\n3251\\r\\n3253\\r\\n3257\\r\\n3259\\r\\n3271\\r\\n3299\\r\\n3301\\r\\n3307\\r\\n3313\\r\\n3319\\r\\n3323\\r\\n3329\\r\\n3331\\r\\n3343\\r\\n3347\\r\\n3359\\r\\n3361\\r\\n3371\\r\\n3373\\r\\n3389\\r\\n3391\\r\\n3407\\r\\n3413\\r\\n3433\\r\\n3449\\r\\n3457\\r\\n3461\\r\\n3463\\r\\n3467\\r\\n3469\\r\\n3491\\r\\n3499\\r\\n3511\\r\\n3517\\r\\n3527\\r\\n3529\\r\\n3533\\r\\n3539\\r\\n3541\\r\\n3547\\r\\n3557\\r\\n3559\\r\\n3571\\r\\n3581\\r\\n3583\\r\\n3593\\r\\n3607\\r\\n3613\\r\\n3617\\r\\n3623\\r\\n3631\\r\\n3637\\r\\n3643\\r\\n3659\\r\\n3671\\r\\n3673\\r\\n3677\\r\\n3691\\r\\n3697\\r\\n3701\\r\\n3709\\r\\n3719\\r\\n3727\\r\\n3733\\r\\n3739\\r\\n3761\\r\\n3767\\r\\n3769\\r\\n3779\\r\\n3793\\r\\n3797\\r\\n3803\\r\\n3821\\r\\n3823\\r\\n3833\\r\\n3847\\r\\n3851\\r\\n3853\\r\\n3863\\r\\n3877\\r\\n3881\\r\\n3889\\r\\n3907\\r\\n3911\\r\\n3917\\r\\n3919\\r\\n3923\\r\\n3929\\r\\n3931\\r\\n3943\\r\\n3947\\r\\n3967\\r\\n3989\\r\\n4001\\r\\n4003\\r\\n4007\\r\\n4013\\r\\n4019\\r\\n4021\\r\\n4027\\r\\n4049\\r\\n4051\\r\\n4057\\r\\n4073\\r\\n4079\\r\\n4091\\r\\n4093\\r\\n4099\\r\\n4111\\r\\n4127\\r\\n4129\\r\\n4133\\r\\n4139\\r\\n4153\\r\\n4157\\r\\n4159\\r\\n4177\\r\\n4201\\r\\n4211\\r\\n4217\\r\\n4219\\r\\n4229\\r\\n4231\\r\\n4241\\r\\n4243\\r\\n4253\\r\\n4259\\r\\n4261\\r\\n4271\\r\\n4273\\r\\n4283\\r\\n4289\\r\\n4297\\r\\n4327\\r\\n4337\\r\\n4339\\r\\n4349\\r\\n4357\\r\\n4363\\r\\n4373\\r\\n4391\\r\\n4397\\r\\n4409\\r\\n4421\\r\\n4423\\r\\n4441\\r\\n4447\\r\\n4451\\r\\n4457\\r\\n4463\\r\\n4481\\r\\n4483\\r\\n4493\\r\\n4507\\r\\n4513\\r\\n4517\\r\\n4519\\r\\n4523\\r\\n4547\\r\\n4549\\r\\n4561\\r\\n4567\\r\\n4583\\r\\n4591\\r\\n4597\\r\\n4603\\r\\n4621\\r\\n4637\\r\\n4639\\r\\n4643\\r\\n4649\\r\\n4651\\r\\n4657\\r\\n4663\\r\\n4673\\r\\n4679\\r\\n4691\\r\\n4703\\r\\n4721\\r\\n4723\\r\\n4729\\r\\n4733\\r\\n4751\\r\\n4759\\r\\n4783\\r\\n4787\\r\\n4789\\r\\n4793\\r\\n4799\\r\\n4801\\r\\n4813\\r\\n4817\\r\\n4831\\r\\n4861\\r\\n4871\\r\\n4877\\r\\n4889\\r\\n4903\\r\\n4909\\r\\n4919\\r\\n4931\\r\\n4933\\r\\n4937\\r\\n4943\\r\\n4951\\r\\n4957\\r\\n4967\\r\\n4969\\r\\n4973\\r\\n4987\\r\\n4993\\r\\n4999\\r\\n5003\\r\\n5009\\r\\n5011\\r\\n5021\\r\\n5023\\r\\n5039\\r\\n5051\\r\\n5059\\r\\n5077\\r\\n5081\\r\\n5087\\r\\n5099\\r\\n5101\\r\\n5107\\r\\n5113\\r\\n5119\\r\\n5147\\r\\n5153\\r\\n5167\\r\\n5171\\r\\n5179\\r\\n5189\\r\\n5197\\r\\n5209\\r\\n5227\\r\\n5231\\r\\n5233\\r\\n5237\\r\\n5261\\r\\n5273\\r\\n5279\\r\\n5281\\r\\n5297\\r\\n5303\\r\\n5309\\r\\n5323\\r\\n5333\\r\\n5347\\r\\n5351\\r\\n5381\\r\\n5387\\r\\n5393\\r\\n5399\\r\\n5407\\r\\n5413\\r\\n5417\\r\\n5419\\r\\n5431\\r\\n5437\\r\\n5441\\r\\n5443\\r\\n5449\\r\\n5471\\r\\n5477\\r\\n5479\\r\\n5483\\r\\n5501\\r\\n5503\\r\\n5507\\r\\n5519\\r\\n5521\\r\\n5527\\r\\n5531\\r\\n5557\\r\\n5563\\r\\n5569\\r\\n5573\\r\\n5581\\r\\n5591\\r\\n5623\\r\\n5639\\r\\n5641\\r\\n5647\\r\\n5651\\r\\n5653\\r\\n5657\\r\\n5659\\r\\n5669\\r\\n5683\\r\\n5689\\r\\n5693\\r\\n5701\\r\\n5711\\r\\n5717\\r\\n5737\\r\\n5741\\r\\n5743\\r\\n5749\\r\\n5779\\r\\n5783\\r\\n5791\\r\\n5801\\r\\n5807\\r\\n5813\\r\\n5821\\r\\n5827\\r\\n5839\\r\\n5843\\r\\n5849\\r\\n5851\\r\\n5857\\r\\n5861\\r\\n5867\\r\\n5869\\r\\n5879\\r\\n5881\\r\\n5897\\r\\n5903\\r\\n5923\\r\\n5927\\r\\n5939\\r\\n5953\\r\\n5981\\r\\n5987\\r\\n6007\\r\\n6011\\r\\n6029\\r\\n6037\\r\\n6043\\r\\n6047\\r\\n6053\\r\\n6067\\r\\n6073\\r\\n6079\\r\\n6089\\r\\n6091\\r\\n6101\\r\\n6113\\r\\n6121\\r\\n6131\\r\\n6133\\r\\n6143\\r\\n6151\\r\\n6163\\r\\n6173\\r\\n6197\\r\\n6199\\r\\n6203\\r\\n6211\\r\\n6217\\r\\n6221\\r\\n6229\\r\\n6247\\r\\n6257\\r\\n6263\\r\\n6269\\r\\n6271\\r\\n6277\\r\\n6287\\r\\n6299\\r\\n6301\\r\\n6311\\r\\n6317\\r\\n6323\\r\\n6329\\r\\n6337\\r\\n6343\\r\\n6353\\r\\n6359\\r\\n6361\\r\\n6367\\r\\n6373\\r\\n6379\\r\\n6389\\r\\n6397\\r\\n6421\\r\\n6427\\r\\n6449\\r\\n6451\\r\\n6469\\r\\n6473\\r\\n6481\\r\\n6491\\r\\n6521\\r\\n6529\\r\\n6547\\r\\n6551\\r\\n6553\\r\\n6563\\r\\n6569\\r\\n6571\\r\\n6577\\r\\n6581\\r\\n6599\\r\\n6607\\r\\n6619\\r\\n6637\\r\\n6653\\r\\n6659\\r\\n6661\\r\\n6673\\r\\n6679\\r\\n6689\\r\\n6691\\r\\n6701\\r\\n6703\\r\\n6709\\r\\n6719\\r\\n6733\\r\\n6737\\r\\n6761\\r\\n6763\\r\\n6779\\r\\n6781\\r\\n6791\\r\\n6793\\r\\n6803\\r\\n6823\\r\\n6827\\r\\n6829\\r\\n6833\\r\\n6841\\r\\n6857\\r\\n6863\\r\\n6869\\r\\n6871\\r\\n6883\\r\\n6899\\r\\n6907\\r\\n6911\\r\\n6917\\r\\n6947\\r\\n6949\\r\\n6959\\r\\n6961\\r\\n6967\\r\\n6971\\r\\n6977\\r\\n6983\\r\\n6991\\r\\n6997\\r\\n7001\\r\\n7013\\r\\n7019\\r\\n7027\\r\\n7039\\r\\n7043\\r\\n7057\\r\\n7069\\r\\n7079\\r\\n7103\\r\\n7109\\r\\n7121\\r\\n7127\\r\\n7129\\r\\n7151\\r\\n7159\\r\\n7177\\r\\n7187\\r\\n7193\\r\\n7207\\r\\n7211\\r\\n7213\\r\\n7219\\r\\n7229\\r\\n7237\\r\\n7243\\r\\n7247\\r\\n7253\\r\\n7283\\r\\n7297\\r\\n7307\\r\\n7309\\r\\n7321\\r\\n7331\\r\\n7333\\r\\n7349\\r\\n7351\\r\\n7369\\r\\n7393\\r\\n7411\\r\\n7417\\r\\n7433\\r\\n7451\\r\\n7457\\r\\n7459\\r\\n7477\\r\\n7481\\r\\n7487\\r\\n7489\\r\\n7499\\r\\n7507\\r\\n7517\\r\\n7523\\r\\n7529\\r\\n7537\\r\\n7541\\r\\n7547\\r\\n7549\\r\\n7559\\r\\n7561\\r\\n7573\\r\\n7577\\r\\n7583\\r\\n7589\\r\\n7591\\r\\n7603\\r\\n7607\\r\\n7621\\r\\n7639\\r\\n7643\\r\\n7649\\r\\n7669\\r\\n7673\\r\\n7681\\r\\n7687\\r\\n7691\\r\\n7699\\r\\n7703\\r\\n7717\\r\\n7723\\r\\n7727\\r\\n7741\\r\\n7753\\r\\n7757\\r\\n7759\\r\\n7789\\r\\n7793\\r\\n7817\\r\\n7823\\r\\n7829\\r\\n7841\\r\\n7853\\r\\n7867\\r\\n7873\\r\\n7877\\r\\n7879\\r\\n7883\\r\\n7901\\r\\n7907\\r\\n7919\\r\\n\", \"base64Encoded\": false, \"contentType\": \"text/csv\"}}\n", + "error": "", + "result": { + "prime_numbers.csv": { + "content": "2\r\n3\r\n5\r\n7\r\n11\r\n13\r\n17\r\n19\r\n23\r\n29\r\n31\r\n37\r\n41\r\n43\r\n47\r\n53\r\n59\r\n61\r\n67\r\n71\r\n73\r\n79\r\n83\r\n89\r\n97\r\n101\r\n103\r\n107\r\n109\r\n113\r\n127\r\n131\r\n137\r\n139\r\n149\r\n151\r\n157\r\n163\r\n167\r\n173\r\n179\r\n181\r\n191\r\n193\r\n197\r\n199\r\n211\r\n223\r\n227\r\n229\r\n233\r\n239\r\n241\r\n251\r\n257\r\n263\r\n269\r\n271\r\n277\r\n281\r\n283\r\n293\r\n307\r\n311\r\n313\r\n317\r\n331\r\n337\r\n347\r\n349\r\n353\r\n359\r\n367\r\n373\r\n379\r\n383\r\n389\r\n397\r\n401\r\n409\r\n419\r\n421\r\n431\r\n433\r\n439\r\n443\r\n449\r\n457\r\n461\r\n463\r\n467\r\n479\r\n487\r\n491\r\n499\r\n503\r\n509\r\n521\r\n523\r\n541\r\n547\r\n557\r\n563\r\n569\r\n571\r\n577\r\n587\r\n593\r\n599\r\n601\r\n607\r\n613\r\n617\r\n619\r\n631\r\n641\r\n643\r\n647\r\n653\r\n659\r\n661\r\n673\r\n677\r\n683\r\n691\r\n701\r\n709\r\n719\r\n727\r\n733\r\n739\r\n743\r\n751\r\n757\r\n761\r\n769\r\n773\r\n787\r\n797\r\n809\r\n811\r\n821\r\n823\r\n827\r\n829\r\n839\r\n853\r\n857\r\n859\r\n863\r\n877\r\n881\r\n883\r\n887\r\n907\r\n911\r\n919\r\n929\r\n937\r\n941\r\n947\r\n953\r\n967\r\n971\r\n977\r\n983\r\n991\r\n997\r\n1009\r\n1013\r\n1019\r\n1021\r\n1031\r\n1033\r\n1039\r\n1049\r\n1051\r\n1061\r\n1063\r\n1069\r\n1087\r\n1091\r\n1093\r\n1097\r\n1103\r\n1109\r\n1117\r\n1123\r\n1129\r\n1151\r\n1153\r\n1163\r\n1171\r\n1181\r\n1187\r\n1193\r\n1201\r\n1213\r\n1217\r\n1223\r\n1229\r\n1231\r\n1237\r\n1249\r\n1259\r\n1277\r\n1279\r\n1283\r\n1289\r\n1291\r\n1297\r\n1301\r\n1303\r\n1307\r\n1319\r\n1321\r\n1327\r\n1361\r\n1367\r\n1373\r\n1381\r\n1399\r\n1409\r\n1423\r\n1427\r\n1429\r\n1433\r\n1439\r\n1447\r\n1451\r\n1453\r\n1459\r\n1471\r\n1481\r\n1483\r\n1487\r\n1489\r\n1493\r\n1499\r\n1511\r\n1523\r\n1531\r\n1543\r\n1549\r\n1553\r\n1559\r\n1567\r\n1571\r\n1579\r\n1583\r\n1597\r\n1601\r\n1607\r\n1609\r\n1613\r\n1619\r\n1621\r\n1627\r\n1637\r\n1657\r\n1663\r\n1667\r\n1669\r\n1693\r\n1697\r\n1699\r\n1709\r\n1721\r\n1723\r\n1733\r\n1741\r\n1747\r\n1753\r\n1759\r\n1777\r\n1783\r\n1787\r\n1789\r\n1801\r\n1811\r\n1823\r\n1831\r\n1847\r\n1861\r\n1867\r\n1871\r\n1873\r\n1877\r\n1879\r\n1889\r\n1901\r\n1907\r\n1913\r\n1931\r\n1933\r\n1949\r\n1951\r\n1973\r\n1979\r\n1987\r\n1993\r\n1997\r\n1999\r\n2003\r\n2011\r\n2017\r\n2027\r\n2029\r\n2039\r\n2053\r\n2063\r\n2069\r\n2081\r\n2083\r\n2087\r\n2089\r\n2099\r\n2111\r\n2113\r\n2129\r\n2131\r\n2137\r\n2141\r\n2143\r\n2153\r\n2161\r\n2179\r\n2203\r\n2207\r\n2213\r\n2221\r\n2237\r\n2239\r\n2243\r\n2251\r\n2267\r\n2269\r\n2273\r\n2281\r\n2287\r\n2293\r\n2297\r\n2309\r\n2311\r\n2333\r\n2339\r\n2341\r\n2347\r\n2351\r\n2357\r\n2371\r\n2377\r\n2381\r\n2383\r\n2389\r\n2393\r\n2399\r\n2411\r\n2417\r\n2423\r\n2437\r\n2441\r\n2447\r\n2459\r\n2467\r\n2473\r\n2477\r\n2503\r\n2521\r\n2531\r\n2539\r\n2543\r\n2549\r\n2551\r\n2557\r\n2579\r\n2591\r\n2593\r\n2609\r\n2617\r\n2621\r\n2633\r\n2647\r\n2657\r\n2659\r\n2663\r\n2671\r\n2677\r\n2683\r\n2687\r\n2689\r\n2693\r\n2699\r\n2707\r\n2711\r\n2713\r\n2719\r\n2729\r\n2731\r\n2741\r\n2749\r\n2753\r\n2767\r\n2777\r\n2789\r\n2791\r\n2797\r\n2801\r\n2803\r\n2819\r\n2833\r\n2837\r\n2843\r\n2851\r\n2857\r\n2861\r\n2879\r\n2887\r\n2897\r\n2903\r\n2909\r\n2917\r\n2927\r\n2939\r\n2953\r\n2957\r\n2963\r\n2969\r\n2971\r\n2999\r\n3001\r\n3011\r\n3019\r\n3023\r\n3037\r\n3041\r\n3049\r\n3061\r\n3067\r\n3079\r\n3083\r\n3089\r\n3109\r\n3119\r\n3121\r\n3137\r\n3163\r\n3167\r\n3169\r\n3181\r\n3187\r\n3191\r\n3203\r\n3209\r\n3217\r\n3221\r\n3229\r\n3251\r\n3253\r\n3257\r\n3259\r\n3271\r\n3299\r\n3301\r\n3307\r\n3313\r\n3319\r\n3323\r\n3329\r\n3331\r\n3343\r\n3347\r\n3359\r\n3361\r\n3371\r\n3373\r\n3389\r\n3391\r\n3407\r\n3413\r\n3433\r\n3449\r\n3457\r\n3461\r\n3463\r\n3467\r\n3469\r\n3491\r\n3499\r\n3511\r\n3517\r\n3527\r\n3529\r\n3533\r\n3539\r\n3541\r\n3547\r\n3557\r\n3559\r\n3571\r\n3581\r\n3583\r\n3593\r\n3607\r\n3613\r\n3617\r\n3623\r\n3631\r\n3637\r\n3643\r\n3659\r\n3671\r\n3673\r\n3677\r\n3691\r\n3697\r\n3701\r\n3709\r\n3719\r\n3727\r\n3733\r\n3739\r\n3761\r\n3767\r\n3769\r\n3779\r\n3793\r\n3797\r\n3803\r\n3821\r\n3823\r\n3833\r\n3847\r\n3851\r\n3853\r\n3863\r\n3877\r\n3881\r\n3889\r\n3907\r\n3911\r\n3917\r\n3919\r\n3923\r\n3929\r\n3931\r\n3943\r\n3947\r\n3967\r\n3989\r\n4001\r\n4003\r\n4007\r\n4013\r\n4019\r\n4021\r\n4027\r\n4049\r\n4051\r\n4057\r\n4073\r\n4079\r\n4091\r\n4093\r\n4099\r\n4111\r\n4127\r\n4129\r\n4133\r\n4139\r\n4153\r\n4157\r\n4159\r\n4177\r\n4201\r\n4211\r\n4217\r\n4219\r\n4229\r\n4231\r\n4241\r\n4243\r\n4253\r\n4259\r\n4261\r\n4271\r\n4273\r\n4283\r\n4289\r\n4297\r\n4327\r\n4337\r\n4339\r\n4349\r\n4357\r\n4363\r\n4373\r\n4391\r\n4397\r\n4409\r\n4421\r\n4423\r\n4441\r\n4447\r\n4451\r\n4457\r\n4463\r\n4481\r\n4483\r\n4493\r\n4507\r\n4513\r\n4517\r\n4519\r\n4523\r\n4547\r\n4549\r\n4561\r\n4567\r\n4583\r\n4591\r\n4597\r\n4603\r\n4621\r\n4637\r\n4639\r\n4643\r\n4649\r\n4651\r\n4657\r\n4663\r\n4673\r\n4679\r\n4691\r\n4703\r\n4721\r\n4723\r\n4729\r\n4733\r\n4751\r\n4759\r\n4783\r\n4787\r\n4789\r\n4793\r\n4799\r\n4801\r\n4813\r\n4817\r\n4831\r\n4861\r\n4871\r\n4877\r\n4889\r\n4903\r\n4909\r\n4919\r\n4931\r\n4933\r\n4937\r\n4943\r\n4951\r\n4957\r\n4967\r\n4969\r\n4973\r\n4987\r\n4993\r\n4999\r\n5003\r\n5009\r\n5011\r\n5021\r\n5023\r\n5039\r\n5051\r\n5059\r\n5077\r\n5081\r\n5087\r\n5099\r\n5101\r\n5107\r\n5113\r\n5119\r\n5147\r\n5153\r\n5167\r\n5171\r\n5179\r\n5189\r\n5197\r\n5209\r\n5227\r\n5231\r\n5233\r\n5237\r\n5261\r\n5273\r\n5279\r\n5281\r\n5297\r\n5303\r\n5309\r\n5323\r\n5333\r\n5347\r\n5351\r\n5381\r\n5387\r\n5393\r\n5399\r\n5407\r\n5413\r\n5417\r\n5419\r\n5431\r\n5437\r\n5441\r\n5443\r\n5449\r\n5471\r\n5477\r\n5479\r\n5483\r\n5501\r\n5503\r\n5507\r\n5519\r\n5521\r\n5527\r\n5531\r\n5557\r\n5563\r\n5569\r\n5573\r\n5581\r\n5591\r\n5623\r\n5639\r\n5641\r\n5647\r\n5651\r\n5653\r\n5657\r\n5659\r\n5669\r\n5683\r\n5689\r\n5693\r\n5701\r\n5711\r\n5717\r\n5737\r\n5741\r\n5743\r\n5749\r\n5779\r\n5783\r\n5791\r\n5801\r\n5807\r\n5813\r\n5821\r\n5827\r\n5839\r\n5843\r\n5849\r\n5851\r\n5857\r\n5861\r\n5867\r\n5869\r\n5879\r\n5881\r\n5897\r\n5903\r\n5923\r\n5927\r\n5939\r\n5953\r\n5981\r\n5987\r\n6007\r\n6011\r\n6029\r\n6037\r\n6043\r\n6047\r\n6053\r\n6067\r\n6073\r\n6079\r\n6089\r\n6091\r\n6101\r\n6113\r\n6121\r\n6131\r\n6133\r\n6143\r\n6151\r\n6163\r\n6173\r\n6197\r\n6199\r\n6203\r\n6211\r\n6217\r\n6221\r\n6229\r\n6247\r\n6257\r\n6263\r\n6269\r\n6271\r\n6277\r\n6287\r\n6299\r\n6301\r\n6311\r\n6317\r\n6323\r\n6329\r\n6337\r\n6343\r\n6353\r\n6359\r\n6361\r\n6367\r\n6373\r\n6379\r\n6389\r\n6397\r\n6421\r\n6427\r\n6449\r\n6451\r\n6469\r\n6473\r\n6481\r\n6491\r\n6521\r\n6529\r\n6547\r\n6551\r\n6553\r\n6563\r\n6569\r\n6571\r\n6577\r\n6581\r\n6599\r\n6607\r\n6619\r\n6637\r\n6653\r\n6659\r\n6661\r\n6673\r\n6679\r\n6689\r\n6691\r\n6701\r\n6703\r\n6709\r\n6719\r\n6733\r\n6737\r\n6761\r\n6763\r\n6779\r\n6781\r\n6791\r\n6793\r\n6803\r\n6823\r\n6827\r\n6829\r\n6833\r\n6841\r\n6857\r\n6863\r\n6869\r\n6871\r\n6883\r\n6899\r\n6907\r\n6911\r\n6917\r\n6947\r\n6949\r\n6959\r\n6961\r\n6967\r\n6971\r\n6977\r\n6983\r\n6991\r\n6997\r\n7001\r\n7013\r\n7019\r\n7027\r\n7039\r\n7043\r\n7057\r\n7069\r\n7079\r\n7103\r\n7109\r\n7121\r\n7127\r\n7129\r\n7151\r\n7159\r\n7177\r\n7187\r\n7193\r\n7207\r\n7211\r\n7213\r\n7219\r\n7229\r\n7237\r\n7243\r\n7247\r\n7253\r\n7283\r\n7297\r\n7307\r\n7309\r\n7321\r\n7331\r\n7333\r\n7349\r\n7351\r\n7369\r\n7393\r\n7411\r\n7417\r\n7433\r\n7451\r\n7457\r\n7459\r\n7477\r\n7481\r\n7487\r\n7489\r\n7499\r\n7507\r\n7517\r\n7523\r\n7529\r\n7537\r\n7541\r\n7547\r\n7549\r\n7559\r\n7561\r\n7573\r\n7577\r\n7583\r\n7589\r\n7591\r\n7603\r\n7607\r\n7621\r\n7639\r\n7643\r\n7649\r\n7669\r\n7673\r\n7681\r\n7687\r\n7691\r\n7699\r\n7703\r\n7717\r\n7723\r\n7727\r\n7741\r\n7753\r\n7757\r\n7759\r\n7789\r\n7793\r\n7817\r\n7823\r\n7829\r\n7841\r\n7853\r\n7867\r\n7873\r\n7877\r\n7879\r\n7883\r\n7901\r\n7907\r\n7919\r\n", + "base64Encoded": false, + "contentType": "text/csv" + } + }, + "exitCode": 0 + } + } +] \ No newline at end of file diff --git a/static/20_prime_numbers.csv b/static/20_prime_numbers.csv new file mode 100644 index 00000000..d5c2a856 --- /dev/null +++ b/static/20_prime_numbers.csv @@ -0,0 +1,1000 @@ +2 +3 +5 +7 +11 +13 +17 +19 +23 +29 +31 +37 +41 +43 +47 +53 +59 +61 +67 +71 +73 +79 +83 +89 +97 +101 +103 +107 +109 +113 +127 +131 +137 +139 +149 +151 +157 +163 +167 +173 +179 +181 +191 +193 +197 +199 +211 +223 +227 +229 +233 +239 +241 +251 +257 +263 +269 +271 +277 +281 +283 +293 +307 +311 +313 +317 +331 +337 +347 +349 +353 +359 +367 +373 +379 +383 +389 +397 +401 +409 +419 +421 +431 +433 +439 +443 +449 +457 +461 +463 +467 +479 +487 +491 +499 +503 +509 +521 +523 +541 +547 +557 +563 +569 +571 +577 +587 +593 +599 +601 +607 +613 +617 +619 +631 +641 +643 +647 +653 +659 +661 +673 +677 +683 +691 +701 +709 +719 +727 +733 +739 +743 +751 +757 +761 +769 +773 +787 +797 +809 +811 +821 +823 +827 +829 +839 +853 +857 +859 +863 +877 +881 +883 +887 +907 +911 +919 +929 +937 +941 +947 +953 +967 +971 +977 +983 +991 +997 +1009 +1013 +1019 +1021 +1031 +1033 +1039 +1049 +1051 +1061 +1063 +1069 +1087 +1091 +1093 +1097 +1103 +1109 +1117 +1123 +1129 +1151 +1153 +1163 +1171 +1181 +1187 +1193 +1201 +1213 +1217 +1223 +1229 +1231 +1237 +1249 +1259 +1277 +1279 +1283 +1289 +1291 +1297 +1301 +1303 +1307 +1319 +1321 +1327 +1361 +1367 +1373 +1381 +1399 +1409 +1423 +1427 +1429 +1433 +1439 +1447 +1451 +1453 +1459 +1471 +1481 +1483 +1487 +1489 +1493 +1499 +1511 +1523 +1531 +1543 +1549 +1553 +1559 +1567 +1571 +1579 +1583 +1597 +1601 +1607 +1609 +1613 +1619 +1621 +1627 +1637 +1657 +1663 +1667 +1669 +1693 +1697 +1699 +1709 +1721 +1723 +1733 +1741 +1747 +1753 +1759 +1777 +1783 +1787 +1789 +1801 +1811 +1823 +1831 +1847 +1861 +1867 +1871 +1873 +1877 +1879 +1889 +1901 +1907 +1913 +1931 +1933 +1949 +1951 +1973 +1979 +1987 +1993 +1997 +1999 +2003 +2011 +2017 +2027 +2029 +2039 +2053 +2063 +2069 +2081 +2083 +2087 +2089 +2099 +2111 +2113 +2129 +2131 +2137 +2141 +2143 +2153 +2161 +2179 +2203 +2207 +2213 +2221 +2237 +2239 +2243 +2251 +2267 +2269 +2273 +2281 +2287 +2293 +2297 +2309 +2311 +2333 +2339 +2341 +2347 +2351 +2357 +2371 +2377 +2381 +2383 +2389 +2393 +2399 +2411 +2417 +2423 +2437 +2441 +2447 +2459 +2467 +2473 +2477 +2503 +2521 +2531 +2539 +2543 +2549 +2551 +2557 +2579 +2591 +2593 +2609 +2617 +2621 +2633 +2647 +2657 +2659 +2663 +2671 +2677 +2683 +2687 +2689 +2693 +2699 +2707 +2711 +2713 +2719 +2729 +2731 +2741 +2749 +2753 +2767 +2777 +2789 +2791 +2797 +2801 +2803 +2819 +2833 +2837 +2843 +2851 +2857 +2861 +2879 +2887 +2897 +2903 +2909 +2917 +2927 +2939 +2953 +2957 +2963 +2969 +2971 +2999 +3001 +3011 +3019 +3023 +3037 +3041 +3049 +3061 +3067 +3079 +3083 +3089 +3109 +3119 +3121 +3137 +3163 +3167 +3169 +3181 +3187 +3191 +3203 +3209 +3217 +3221 +3229 +3251 +3253 +3257 +3259 +3271 +3299 +3301 +3307 +3313 +3319 +3323 +3329 +3331 +3343 +3347 +3359 +3361 +3371 +3373 +3389 +3391 +3407 +3413 +3433 +3449 +3457 +3461 +3463 +3467 +3469 +3491 +3499 +3511 +3517 +3527 +3529 +3533 +3539 +3541 +3547 +3557 +3559 +3571 +3581 +3583 +3593 +3607 +3613 +3617 +3623 +3631 +3637 +3643 +3659 +3671 +3673 +3677 +3691 +3697 +3701 +3709 +3719 +3727 +3733 +3739 +3761 +3767 +3769 +3779 +3793 +3797 +3803 +3821 +3823 +3833 +3847 +3851 +3853 +3863 +3877 +3881 +3889 +3907 +3911 +3917 +3919 +3923 +3929 +3931 +3943 +3947 +3967 +3989 +4001 +4003 +4007 +4013 +4019 +4021 +4027 +4049 +4051 +4057 +4073 +4079 +4091 +4093 +4099 +4111 +4127 +4129 +4133 +4139 +4153 +4157 +4159 +4177 +4201 +4211 +4217 +4219 +4229 +4231 +4241 +4243 +4253 +4259 +4261 +4271 +4273 +4283 +4289 +4297 +4327 +4337 +4339 +4349 +4357 +4363 +4373 +4391 +4397 +4409 +4421 +4423 +4441 +4447 +4451 +4457 +4463 +4481 +4483 +4493 +4507 +4513 +4517 +4519 +4523 +4547 +4549 +4561 +4567 +4583 +4591 +4597 +4603 +4621 +4637 +4639 +4643 +4649 +4651 +4657 +4663 +4673 +4679 +4691 +4703 +4721 +4723 +4729 +4733 +4751 +4759 +4783 +4787 +4789 +4793 +4799 +4801 +4813 +4817 +4831 +4861 +4871 +4877 +4889 +4903 +4909 +4919 +4931 +4933 +4937 +4943 +4951 +4957 +4967 +4969 +4973 +4987 +4993 +4999 +5003 +5009 +5011 +5021 +5023 +5039 +5051 +5059 +5077 +5081 +5087 +5099 +5101 +5107 +5113 +5119 +5147 +5153 +5167 +5171 +5179 +5189 +5197 +5209 +5227 +5231 +5233 +5237 +5261 +5273 +5279 +5281 +5297 +5303 +5309 +5323 +5333 +5347 +5351 +5381 +5387 +5393 +5399 +5407 +5413 +5417 +5419 +5431 +5437 +5441 +5443 +5449 +5471 +5477 +5479 +5483 +5501 +5503 +5507 +5519 +5521 +5527 +5531 +5557 +5563 +5569 +5573 +5581 +5591 +5623 +5639 +5641 +5647 +5651 +5653 +5657 +5659 +5669 +5683 +5689 +5693 +5701 +5711 +5717 +5737 +5741 +5743 +5749 +5779 +5783 +5791 +5801 +5807 +5813 +5821 +5827 +5839 +5843 +5849 +5851 +5857 +5861 +5867 +5869 +5879 +5881 +5897 +5903 +5923 +5927 +5939 +5953 +5981 +5987 +6007 +6011 +6029 +6037 +6043 +6047 +6053 +6067 +6073 +6079 +6089 +6091 +6101 +6113 +6121 +6131 +6133 +6143 +6151 +6163 +6173 +6197 +6199 +6203 +6211 +6217 +6221 +6229 +6247 +6257 +6263 +6269 +6271 +6277 +6287 +6299 +6301 +6311 +6317 +6323 +6329 +6337 +6343 +6353 +6359 +6361 +6367 +6373 +6379 +6389 +6397 +6421 +6427 +6449 +6451 +6469 +6473 +6481 +6491 +6521 +6529 +6547 +6551 +6553 +6563 +6569 +6571 +6577 +6581 +6599 +6607 +6619 +6637 +6653 +6659 +6661 +6673 +6679 +6689 +6691 +6701 +6703 +6709 +6719 +6733 +6737 +6761 +6763 +6779 +6781 +6791 +6793 +6803 +6823 +6827 +6829 +6833 +6841 +6857 +6863 +6869 +6871 +6883 +6899 +6907 +6911 +6917 +6947 +6949 +6959 +6961 +6967 +6971 +6977 +6983 +6991 +6997 +7001 +7013 +7019 +7027 +7039 +7043 +7057 +7069 +7079 +7103 +7109 +7121 +7127 +7129 +7151 +7159 +7177 +7187 +7193 +7207 +7211 +7213 +7219 +7229 +7237 +7243 +7247 +7253 +7283 +7297 +7307 +7309 +7321 +7331 +7333 +7349 +7351 +7369 +7393 +7411 +7417 +7433 +7451 +7457 +7459 +7477 +7481 +7487 +7489 +7499 +7507 +7517 +7523 +7529 +7537 +7541 +7547 +7549 +7559 +7561 +7573 +7577 +7583 +7589 +7591 +7603 +7607 +7621 +7639 +7643 +7649 +7669 +7673 +7681 +7687 +7691 +7699 +7703 +7717 +7723 +7727 +7741 +7753 +7757 +7759 +7789 +7793 +7817 +7823 +7829 +7841 +7853 +7867 +7873 +7877 +7879 +7883 +7901 +7907 +7919 diff --git a/static/21_email_preview.html b/static/21_email_preview.html new file mode 100644 index 00000000..b02167ca --- /dev/null +++ b/static/21_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Prime Numbers CSV + + + +
+ + + +
+ + + \ No newline at end of file diff --git a/static/22_email_template.json b/static/22_email_template.json new file mode 100644 index 00000000..ea8be43b --- /dev/null +++ b/static/22_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "recipient@example.com", + "subject": "Prime Numbers CSV", + "plainBody": "Sehr geehrte Damen und Herren,\n\nanbei finden Sie die Datei 'prime_numbers.csv', die die Liste der Primzahlen enth\u00e4lt.\n\nMit freundlichen Gr\u00fc\u00dfen,\nIhr Team", + "htmlBody": "

Sehr geehrte Damen und Herren,

anbei finden Sie die Datei 'prime_numbers.csv', die die Liste der Primzahlen enth\u00e4lt.

Mit freundlichen Gr\u00fc\u00dfen,
Ihr Team

" +} \ No newline at end of file diff --git a/static/23_documentProcessor.py b/static/23_documentProcessor.py new file mode 100644 index 00000000..d3b637e1 --- /dev/null +++ b/static/23_documentProcessor.py @@ -0,0 +1,933 @@ +""" +Module for extracting content from various file formats. +Provides specialized functions for processing text, PDF, Office documents, images, etc. +""" + +import logging +import os +import io +from typing import Dict, Any, List, Optional, Union, Tuple +import base64 + +# Configure logger +logger = logging.getLogger(__name__) + +# Optional imports - only loaded when needed +pdfExtractorLoaded = False +officeExtractorLoaded = False +imageProcessorLoaded = False + +def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]: + """ + Main function for extracting content from a file based on its MIME type. + Delegates to specialized extraction functions. + + Args: + fileMetadata: File metadata (Name, MIME type, etc.) + fileContent: Binary data of the file + + Returns: + List of Document-Content objects with metadata and base64Encoded flag + """ + try: + mimeType = fileMetadata.get("mimeType", "application/octet-stream") + fileName = fileMetadata.get("name", "unknown") + + logger.info(f"Extracting content from file '{fileName}' (MIME type: {mimeType})") + + # Extract content based on MIME type + contents = [] + + # Text-based formats (excluding CSV which has its own handler) + if mimeType == "text/csv": + contents.extend(extractCsvContent(fileName, fileContent)) + + # Then handle other text-based formats + elif mimeType.startswith("text/") or mimeType in [ + "application/json", + "application/xml", + "application/javascript", + "application/x-python" + ]: + contents.extend(extractTextContent(fileName, fileContent, mimeType)) + + # SVG Files + elif mimeType == "image/svg+xml": + contents.extend(extractSvgContent(fileName, fileContent)) + + # Images + elif mimeType.startswith("image/"): + contents.extend(extractImageContent(fileName, fileContent, mimeType)) + + # PDF Documents + elif mimeType == "application/pdf": + contents.extend(extractPdfContent(fileName, fileContent)) + + # Word Documents + elif mimeType in [ + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "application/msword" + ]: + contents.extend(extractWordContent(fileName, fileContent, mimeType)) + + # Excel Documents + elif mimeType in [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "application/vnd.ms-excel" + ]: + contents.extend(extractExcelContent(fileName, fileContent, mimeType)) + + # PowerPoint Documents + elif mimeType in [ + "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "application/vnd.ms-powerpoint" + ]: + contents.extend(extractPowerpointContent(fileName, fileContent, mimeType)) + + # Binary data as fallback for unknown formats + else: + contents.extend(extractBinaryContent(fileName, fileContent, mimeType)) + + # Fallback when no content could be extracted + if not contents: + logger.warning(f"No content extracted from file '{fileName}', using binary fallback") + + # Convert binary content to base64 + encoded_data = base64.b64encode(fileContent).decode('utf-8') + + contents.append({ + "sequenceNr": 1, + "name": '1_undefined', + "ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin", + "contentType": mimeType, + "data": encoded_data, + "base64Encoded": True, + "metadata": { + "isText": False + } + }) + + # Add generic attributes for all documents + for content in contents: + # Make sure all content items have the base64Encoded flag + if "base64Encoded" not in content: + if isinstance(content.get("data"), bytes): + # Convert bytes to base64 + content["data"] = base64.b64encode(content["data"]).decode('utf-8') + content["base64Encoded"] = True + else: + # Assume text content if not explicitly marked + content["base64Encoded"] = False + + # Maintain backward compatibility with old "base64Encoded" flag in metadata + if "metadata" not in content: + content["metadata"] = {} + + # Set base64Encoded in metadata for backward compatibility + content["metadata"]["base64Encoded"] = content["base64Encoded"] + + logger.info(f"Successfully extracted {len(contents)} content items from file '{fileName}'") + return contents + + except Exception as e: + logger.error(f"Error during content extraction: {str(e)}") + # Fallback on error - return original data + return [{ + "sequenceNr": 1, + "name": fileMetadata.get("name", "unknown"), + "ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin", + "contentType": fileMetadata.get("mimeType", "application/octet-stream"), + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "base64Encoded": True # For backward compatibility + } + }] + + +def _loadPdfExtractor(): + """Loads PDF extraction libraries when needed""" + global pdfExtractorLoaded + if not pdfExtractorLoaded: + try: + global PyPDF2, fitz + import PyPDF2 + import fitz # PyMuPDF for more extensive PDF processing + pdfExtractorLoaded = True + logger.info("PDF extraction libraries successfully loaded") + except ImportError as e: + logger.warning(f"PDF extraction libraries could not be loaded: {e}") + +def _loadOfficeExtractor(): + """Loads Office document extraction libraries when needed""" + global officeExtractorLoaded + if not officeExtractorLoaded: + try: + global docx, openpyxl + import docx # python-docx for Word documents + import openpyxl # for Excel files + officeExtractorLoaded = True + logger.info("Office extraction libraries successfully loaded") + except ImportError as e: + logger.warning(f"Office extraction libraries could not be loaded: {e}") + +def _loadImageProcessor(): + """Loads image processing libraries when needed""" + global imageProcessorLoaded + if not imageProcessorLoaded: + try: + global PIL, Image + from PIL import Image + imageProcessorLoaded = True + logger.info("Image processing libraries successfully loaded") + except ImportError as e: + logger.warning(f"Image processing libraries could not be loaded: {e}") + +def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: + """ + Extracts text from text files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List of Text-Content objects with base64Encoded = False + """ + try: + # Keep original file extension + fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "txt" + + # Extract text content + textContent = fileContent.decode('utf-8') + return [{ + "sequenceNr": 1, + "name": "1_text", # Simplified naming + "ext": fileExtension, + "contentType": "text/plain", + "data": textContent, + "base64Encoded": False, + "metadata": { + "isText": True + } + }] + except UnicodeDecodeError: + logger.warning(f"Could not decode text from file '{fileName}' as UTF-8, trying alternative encodings") + try: + # Try alternative encodings + for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: + try: + textContent = fileContent.decode(encoding) + logger.info(f"Text successfully decoded with encoding {encoding}") + return [{ + "sequenceNr": 1, + "name": "1_text", # Simplified naming + "ext": fileExtension, + "contentType": "text/plain", + "data": textContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "encoding": encoding + } + }] + except UnicodeDecodeError: + continue + + # Fallback to binary data if no encoding works + logger.warning(f"Could not decode text, using binary data") + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + except Exception as e: + logger.error(f"Error in alternative text decoding: {str(e)}") + # Return binary data as fallback + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + +def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: + """ + Extracts content from CSV files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + + Returns: + List of CSV-Content objects with base64Encoded = False + """ + try: + # Extract text content + csvContent = fileContent.decode('utf-8') + return [{ + "sequenceNr": 1, + "name": "1_csv", # Simplified naming + "ext": "csv", + "contentType": "text/csv", + "data": csvContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "format": "csv" + } + }] + except UnicodeDecodeError: + logger.warning(f"Could not decode CSV from file '{fileName}' as UTF-8, trying alternative encodings") + try: + # Try alternative encodings for CSV + for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: + try: + csvContent = fileContent.decode(encoding) + logger.info(f"CSV successfully decoded with encoding {encoding}") + return [{ + "sequenceNr": 1, + "name": "1_csv", # Simplified naming + "ext": "csv", + "contentType": "text/csv", + "data": csvContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "encoding": encoding, + "format": "csv" + } + }] + except UnicodeDecodeError: + continue + + # Fallback to binary data + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": "csv", + "contentType": "text/csv", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + except Exception as e: + logger.error(f"Error in alternative CSV decoding: {str(e)}") + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": "csv", + "contentType": "text/csv", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + +def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: + """ + Extracts content from SVG files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + + Returns: + List of SVG-Content objects with dual text/image metadata + """ + contents = [] + + try: + # Extract SVG as text content (XML) + svgText = fileContent.decode('utf-8') + + # Check if it's actually SVG by looking for the SVG tag + if " List[Dict[str, Any]]: + """ + Extracts content from image files and optionally generates metadata descriptions. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List of Image-Content objects with base64Encoded = True + """ + + # Extract file extension from MIME type or filename + fileExtension = mimeType.split('/')[-1] + if fileExtension == "jpeg": + fileExtension = "jpg" + + # If possible, analyze image and extract metadata + imageMetadata = { + "isText": False, + "format": "image" + } + imageDescription = None + + try: + _loadImageProcessor() + if imageProcessorLoaded and fileContent and len(fileContent) > 0: + with io.BytesIO(fileContent) as imgStream: + try: + img = Image.open(imgStream) + # Check if the image was actually loaded + img.verify() + # To safely continue working, reload + imgStream.seek(0) + img = Image.open(imgStream) + imageMetadata.update({ + "format": img.format, + "mode": img.mode, + "width": img.width, + "height": img.height + }) + # Extract EXIF data if available + if hasattr(img, '_getexif') and callable(img._getexif): + exif = img._getexif() + if exif: + exifData = {} + for tagId, value in exif.items(): + exifData[f"tag_{tagId}"] = str(value) + imageMetadata["exif"] = exifData + + # Generate image description + imageDescription = f"Image ({img.width}x{img.height}, {img.format}, {img.mode})" + except Exception as innerE: + logger.warning(f"Error processing image: {str(innerE)}") + imageMetadata["error"] = str(innerE) + imageDescription = f"Image (unable to process: {str(innerE)})" + except Exception as e: + logger.warning(f"Could not extract image metadata: {str(e)}") + imageMetadata["error"] = str(e) + + # Convert binary image to base64 + encoded_data = base64.b64encode(fileContent).decode('utf-8') + + # Return image content + contents = [{ + "sequenceNr": 1, + "name": "1_image", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": encoded_data, + "base64Encoded": True, + "metadata": imageMetadata + }] + + # If image description available, add as additional text content + if imageDescription: + contents.append({ + "sequenceNr": 2, + "name": "2_text_image_info", # Simplified naming with label + "ext": "txt", + "contentType": "text/plain", + "data": imageDescription, + "base64Encoded": False, + "metadata": { + "isText": True, + "imageDescription": True + } + }) + + return contents + +def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: + """ + Extracts text and images from PDF files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + + Returns: + List of PDF-Content objects (text and images) with appropriate base64Encoded flags + """ + contents = [] + extractedContentFound = False + + try: + # Load PDF extraction libraries + _loadPdfExtractor() + if not pdfExtractorLoaded: + logger.warning("PDF extraction not possible: Libraries not available") + # Add original file as binary content + contents.append({ + "sequenceNr": 1, + "name": "1_pdf", # Simplified naming + "ext": "pdf", + "contentType": "application/pdf", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "pdf" + } + }) + return contents + + # Extract text with PyPDF2 + extractedText = "" + pdfMetadata = {} + with io.BytesIO(fileContent) as pdfStream: + pdfReader = PyPDF2.PdfReader(pdfStream) + + # Extract metadata + pdfInfo = pdfReader.metadata or {} + for key, value in pdfInfo.items(): + if key.startswith('/'): + pdfMetadata[key[1:]] = value + else: + pdfMetadata[key] = value + + # Extract text from all pages + for pageNum in range(len(pdfReader.pages)): + page = pdfReader.pages[pageNum] + pageText = page.extract_text() + if pageText: + extractedText += f"--- Page {pageNum + 1} ---\n{pageText}\n\n" + + # If text was found, add as separate content + if extractedText.strip(): + extractedContentFound = True + contents.append({ + "sequenceNr": len(contents) + 1, + "name": f"{len(contents) + 1}_text", # Simplified naming + "ext": "txt", + "contentType": "text/plain", + "data": extractedText, + "base64Encoded": False, + "metadata": { + "isText": True, + "source": "pdf", + "pages": len(pdfReader.pages), + "pdfMetadata": pdfMetadata + } + }) + + # Extract images with PyMuPDF (fitz) + try: + with io.BytesIO(fileContent) as pdfStream: + doc = fitz.open(stream=pdfStream, filetype="pdf") + imageCount = 0 + + for pageNum in range(len(doc)): + page = doc[pageNum] + imageList = page.get_images(full=True) + + for imgIndex, imgInfo in enumerate(imageList): + try: + imageCount += 1 + xref = imgInfo[0] + baseImage = doc.extract_image(xref) + imageBytes = baseImage["image"] + imageExt = baseImage["ext"] + + # Add image as content - encode as base64 + extractedContentFound = True + contents.append({ + "sequenceNr": len(contents) + 1, + "name": f"{len(contents) + 1}_image_page{pageNum+1}_{imgIndex+1}", # Simplified naming with label + "ext": imageExt, + "contentType": f"image/{imageExt}", + "data": base64.b64encode(imageBytes).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "source": "pdf", + "page": pageNum + 1, + "index": imgIndex + } + }) + except Exception as imgE: + logger.warning(f"Error extracting image {imgIndex} on page {pageNum + 1}: {str(imgE)}") + + # Close document + doc.close() + + except Exception as imgExtractE: + logger.warning(f"Error extracting images from PDF: {str(imgExtractE)}") + + except Exception as e: + logger.error(f"Error in PDF extraction: {str(e)}") + + # If no content was extracted, add the original PDF + if not extractedContentFound: + contents.append({ + "sequenceNr": 1, + "name": "1_pdf", # Simplified naming + "ext": "pdf", + "contentType": "application/pdf", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "pdf" + } + }) + + return contents + +def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: + """ + Extracts text and images from Word documents. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List of Word-Content objects (text and possibly images) with appropriate base64Encoded flags + """ + contents = [] + extractedContentFound = False + + # Determine file extension + fileExtension = "docx" if mimeType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" else "doc" + + try: + # Load Office extraction libraries + _loadOfficeExtractor() + if not officeExtractorLoaded: + logger.warning("Word extraction not possible: Libraries not available") + # Add original file as binary content + contents.append({ + "sequenceNr": 1, + "name": "1_word", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "word" + } + }) + return contents + + # Only supports DOCX (newer format) + if mimeType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document": + with io.BytesIO(fileContent) as docxStream: + doc = docx.Document(docxStream) + + # Extract text + fullText = [] + for para in doc.paragraphs: + fullText.append(para.text) + + # Extract tables + for table in doc.tables: + for row in table.rows: + rowText = [] + for cell in row.cells: + rowText.append(cell.text) + fullText.append(" | ".join(rowText)) + + extractedText = "\n\n".join(fullText) + + # Add extracted text as content + if extractedText.strip(): + extractedContentFound = True + contents.append({ + "sequenceNr": 1, + "name": "1_text", # Simplified naming + "ext": "txt", + "contentType": "text/plain", + "data": extractedText, + "base64Encoded": False, + "metadata": { + "isText": True, + "source": "docx", + "paragraphCount": len(doc.paragraphs), + "tableCount": len(doc.tables) + } + }) + else: + logger.warning(f"Extraction from old Word format (DOC) not supported") + + except Exception as e: + logger.error(f"Error in Word extraction: {str(e)}") + + # If no content was extracted, add the original document + if not extractedContentFound: + contents.append({ + "sequenceNr": 1, + "name": "1_word", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "word" + } + }) + + return contents + +def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: + """ + Extracts table data from Excel files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List of Excel-Content objects with appropriate base64Encoded flags + """ + contents = [] + extractedContentFound = False + + # Determine file extension + fileExtension = "xlsx" if mimeType == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" else "xls" + + try: + # Load Office extraction libraries + _loadOfficeExtractor() + if not officeExtractorLoaded: + logger.warning("Excel extraction not possible: Libraries not available") + # Add original file as binary content + contents.append({ + "sequenceNr": 1, + "name": "1_excel", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "excel" + } + }) + return contents + + # Only supports XLSX (newer format) + if mimeType == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": + with io.BytesIO(fileContent) as xlsxStream: + workbook = openpyxl.load_workbook(xlsxStream, data_only=True) + + # Extract each worksheet as separate CSV content + for sheetIndex, sheetName in enumerate(workbook.sheetnames): + sheet = workbook[sheetName] + + # Format data as CSV + csvRows = [] + for row in sheet.iter_rows(): + csvRow = [] + for cell in row: + value = cell.value + if value is None: + csvRow.append("") + else: + csvRow.append(str(value).replace('"', '""')) + csvRows.append(','.join(f'"{cell}"' for cell in csvRow)) + + csvContent = "\n".join(csvRows) + + # Add as CSV content + if csvContent.strip(): + extractedContentFound = True + sheetSafeName = sheetName.replace(" ", "_").replace("/", "_").replace("\\", "_") + contents.append({ + "sequenceNr": len(contents) + 1, + "name": f"{len(contents) + 1}_csv_{sheetSafeName}", # Simplified naming with sheet label + "ext": "csv", + "contentType": "text/csv", + "data": csvContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "source": "xlsx", + "sheet": sheetName, + "format": "csv" + } + }) + else: + logger.warning(f"Extraction from old Excel format (XLS) not supported") + + except Exception as e: + logger.error(f"Error in Excel extraction: {str(e)}") + + # If no content was extracted, add the original document + if not extractedContentFound: + contents.append({ + "sequenceNr": 1, + "name": "1_excel", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "excel" + } + }) + + return contents + +def extractPowerpointContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: + """ + Extracts content from PowerPoint presentations. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List of PowerPoint-Content objects with base64Encoded = True + """ + # For PowerPoint, we currently only return the original binary file + # A complete extraction would require more specialized libraries + fileExtension = "pptx" if mimeType == "application/vnd.openxmlformats-officedocument.presentationml.presentation" else "ppt" + return [{ + "sequenceNr": 1, + "name": "1_powerpoint", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "powerpoint" + } + }] + +def extractBinaryContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: + """ + Fallback for binary files where no specific extraction is possible. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List with a binary Content object with base64Encoded = True + """ + fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin" + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "format": "binary" + } + }] \ No newline at end of file diff --git a/static/24_defAttributes.py b/static/24_defAttributes.py new file mode 100644 index 00000000..731ecfd9 --- /dev/null +++ b/static/24_defAttributes.py @@ -0,0 +1,123 @@ +from pydantic import BaseModel, Field +from typing import List, Dict, Any, Optional + +# Define the model for attribute definitions +class AttributeDefinition(BaseModel): + name: str + label: str + type: str + required: bool = False + placeholder: Optional[str] = None + defaultValue: Optional[Any] = None + options: Optional[List[Dict[str, Any]]] = None + editable: bool = True + visible: bool = True + order: int = 0 + validation: Optional[Dict[str, Any]] = None + helpText: Optional[str] = None + +# Helper classes for type mapping +typeMappings = { + "int": "number", + "str": "string", + "float": "number", + "bool": "boolean", + "List[int]": "array", + "List[str]": "array", + "Dict[str, Any]": "object", + "Optional[str]": "string", + "Optional[int]": "number", + "Optional[Dict[str, Any]]": "object" +} + +# Special field types based on naming conventions +specialFieldTypes = { + "content": "textarea", + "description": "textarea", + "instructions": "textarea", + "password": "password", + "email": "email", + "workspaceId": "select", + "agentId": "select", + "type": "select" +} + +# Function to convert a Pydantic model into attribute definitions +def getModelAttributes(modelClass, userLanguage="de"): + """ + Converts a Pydantic model into a list of AttributeDefinition objects + """ + attributes = [] + + # Go through all fields in the model + for i, (fieldName, field) in enumerate(modelClass.__fields__.items()): + # Skip internal fields + if fieldName.startswith('_') or fieldName in ["label", "fieldLabels"]: + continue + + # Determine the field type + fieldType = typeMappings.get(str(field.type_), "string") + + # Check for special field types + if fieldName in specialFieldTypes: + fieldType = specialFieldTypes[fieldName] + + # Get the label (if available) + fieldLabel = fieldName.replace('_', ' ').capitalize() + if hasattr(modelClass, 'fieldLabels') and fieldName in modelClass.fieldLabels: + labelObj = modelClass.fieldLabels[fieldName] + fieldLabel = labelObj.getLabel(userLanguage) + + # Determine default values and required status + required = field.required + defaultValue = field.default if not field.required else None + + # Check for validation rules + validation = None + if field.validators: + validation = {"hasValidators": True} + + # Placeholder text + placeholder = f"Please enter {fieldLabel}" + + # Special options for Select fields + options = None + if fieldType == "select": + if fieldName == "type" and modelClass.__name__ == "Agent": + options = [ + {"value": "Analysis", "label": "Analysis"}, + {"value": "Transformation", "label": "Transformation"}, + {"value": "Generation", "label": "Generation"}, + {"value": "Classification", "label": "Classification"}, + {"value": "Custom", "label": "Custom"} + ] + + # Extract description from Field object + description = None + # Try to get description from various possible sources + if hasattr(field, 'field_info') and hasattr(field.field_info, 'description'): + description = field.field_info.description + elif hasattr(field, 'description'): + description = field.description + elif hasattr(field, 'schema') and hasattr(field.schema, 'description'): + description = field.schema.description + + # Create attribute definition + attrDef = AttributeDefinition( + name=fieldName, + label=fieldLabel, + type=fieldType, + required=required, + placeholder=placeholder, + defaultValue=defaultValue, + options=options, + editable=fieldName not in ["id", "mandateId", "userId", "createdAt", "uploadDate"], + visible=fieldName not in ["hashedPassword", "mandateId", "userId"], + order=i, + validation=validation, + helpText=description or "" # Set empty string as default value if no description found + ) + + attributes.append(attrDef) + + return attributes \ No newline at end of file diff --git a/static/25_email_preview.html b/static/25_email_preview.html new file mode 100644 index 00000000..b9a1d176 --- /dev/null +++ b/static/25_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Attached: documentProcessor.py and defAttributes.py + + + + + + + \ No newline at end of file diff --git a/static/26_email_template.json b/static/26_email_template.json new file mode 100644 index 00000000..bbc2aa46 --- /dev/null +++ b/static/26_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "recipient@example.com", + "subject": "Attached: documentProcessor.py and defAttributes.py", + "plainBody": "Sehr geehrte Damen und Herren,\n\nanbei finden Sie die angeforderten Dokumente 'documentProcessor.py' und 'defAttributes.py'. Bitte z\u00f6gern Sie nicht, sich bei Fragen oder weiteren Anliegen an uns zu wenden.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\nIhr Team", + "htmlBody": "

Sehr geehrte Damen und Herren,

anbei finden Sie die angeforderten Dokumente documentProcessor.py und defAttributes.py. Bitte z\u00f6gern Sie nicht, sich bei Fragen oder weiteren Anliegen an uns zu wenden.

Mit freundlichen Gr\u00fc\u00dfen,
Ihr Team

" +} \ No newline at end of file diff --git a/static/27_email_preview.html b/static/27_email_preview.html new file mode 100644 index 00000000..b250da5e --- /dev/null +++ b/static/27_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Angehängt: documentProcessor.py und defAttributes.py + + + + + + + \ No newline at end of file diff --git a/static/28_email_template.json b/static/28_email_template.json new file mode 100644 index 00000000..e0355eea --- /dev/null +++ b/static/28_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "team@example.com", + "subject": "Angeh\u00e4ngt: documentProcessor.py und defAttributes.py", + "plainBody": "Liebe Teammitglieder,\n\nim Anhang finden Sie die Dateien documentProcessor.py und defAttributes.py. Bitte \u00fcberpr\u00fcfen Sie diese und geben Sie mir Ihr Feedback.\n\nMit freundlichen Gr\u00fc\u00dfen,\n[Ihr Name]", + "htmlBody": "
E-Mail-Vorschau

Liebe Teammitglieder,

im Anhang finden Sie die Dateien documentProcessor.py und defAttributes.py. Bitte \u00fcberpr\u00fcfen Sie diese und geben Sie mir Ihr Feedback.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

Dies ist eine Vorschau der E-Mail und kann in verschiedenen E-Mail-Clients unterschiedlich angezeigt werden.
" +} \ No newline at end of file diff --git a/static/6_email_preview.html b/static/6_email_preview.html new file mode 100644 index 00000000..2f609af2 --- /dev/null +++ b/static/6_email_preview.html @@ -0,0 +1,42 @@ + + + + + + Email Preview: Verspätete Ankunft morgen + + + + + + + \ No newline at end of file diff --git a/static/7_email_template.json b/static/7_email_template.json new file mode 100644 index 00000000..9d50aa0d --- /dev/null +++ b/static/7_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "i.dittrich@valueon.ch", + "subject": "Versp\u00e4tete Ankunft morgen", + "plainBody": "Hallo Ida,\n\nich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.\n\nBis dann!\n\nViele Gr\u00fc\u00dfe", + "htmlBody": "

Hallo Ida,

ich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.

Bis dann!

Viele Gr\u00fc\u00dfe

" +} \ No newline at end of file diff --git a/static/8_email_preview.html b/static/8_email_preview.html new file mode 100644 index 00000000..dd5c9ff8 --- /dev/null +++ b/static/8_email_preview.html @@ -0,0 +1,74 @@ + + + + + + Email Preview: Verspätete Ankunft morgen + + + + + + + \ No newline at end of file diff --git a/static/9_email_template.json b/static/9_email_template.json new file mode 100644 index 00000000..704bb247 --- /dev/null +++ b/static/9_email_template.json @@ -0,0 +1,6 @@ +{ + "recipient": "i.dittrich@valueon.ch", + "subject": "Versp\u00e4tete Ankunft morgen", + "plainBody": "Hallo Ida,\n\nich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.\n\nBis dann!\n\nViele Gr\u00fc\u00dfe", + "htmlBody": "\n\n\nEmail Preview: Versp\u00e4tete Ankunft morgen\n\n\n\n
\n
\n

Email Template Preview

\n
\n
\n

To: i.dittrich@valueon.ch

\n

Subject: Versp\u00e4tete Ankunft morgen

\n
\n

Hallo Ida,

\n

ich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.

\n

Bis dann!

\n

Viele Gr\u00fc\u00dfe

\n
\n
\n
\n

Dies ist eine Vorschau des E-Mail-Templates.

\n
\n
\n\n" +} \ No newline at end of file diff --git a/token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json b/token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json new file mode 100644 index 00000000..01b47ee0 --- /dev/null +++ b/token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json @@ -0,0 +1 @@ +{"access_token": "eyJ0eXAiOiJKV1QiLCJub25jZSI6ImMyMkF0TDR3NzdGSEluczFRVlFKSFFfMWEzN1I1WmtZZFJ5NmhLNElaY00iLCJhbGciOiJSUzI1NiIsIng1dCI6IkNOdjBPSTNSd3FsSEZFVm5hb01Bc2hDSDJYRSIsImtpZCI6IkNOdjBPSTNSd3FsSEZFVm5hb01Bc2hDSDJYRSJ9.eyJhdWQiOiIwMDAwMDAwMy0wMDAwLTAwMDAtYzAwMC0wMDAwMDAwMDAwMDAiLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC82YTUxYWFlYi0yNDY3LTQxODYtOTUwNC0yYTA1YWVkYzU5MWYvIiwiaWF0IjoxNzQ2NTc0ODAwLCJuYmYiOjE3NDY1NzQ4MDAsImV4cCI6MTc0NjU4MDE0MSwiYWNjdCI6MCwiYWNyIjoiMSIsImFjcnMiOlsicDEiXSwiYWlvIjoiQWFRQVcvOFpBQUFBM3ZjTDcyMktKd0lmbk4yQnFxMW5lQWVlSjMwMlA2VEg3QVFPTzJKRG85UU5UdlFqaFRZTEd6QURWb0VOSkN4OStZSHZNUjBGUWxPUlN0QWZLUVFBVzJacFJaME02YlhidS9rb0d2cFIyKy92OGhQOTZySnl3b3ZzZlFTZzh3c2MrSndNa0NZQ2djU3VnWkRmcUxNMS9ZYlpyVnFPQUNzeUpQdTJpTFlwbWpOZStKNXhQSUc4eE1KdlZlbStRbFkrS0RrNW5DcSs5R2t2eU1zVEk4d2duQT09IiwiYW1yIjpbInB3ZCIsInJzYSIsIm1mYSJdLCJhcHBfZGlzcGxheW5hbWUiOiJQTSBUZXN0IC0gRW1haWwgRHJhZnQiLCJhcHBpZCI6ImM3ZTcxMTJkLTYxZGMtNGYzYS04Y2QzLTA4Y2M0Y2Q3NTA0YyIsImFwcGlkYWNyIjoiMSIsImRldmljZWlkIjoiOWE0YTM2OWEtNjBhOS00NjdlLWFjNTktODdkZGQyMDUxZGU5IiwiZmFtaWx5X25hbWUiOiJNb3RzY2giLCJnaXZlbl9uYW1lIjoiUGF0cmljayIsImlkdHlwIjoidXNlciIsImlwYWRkciI6IjE3OC4xOTcuMjIyLjE0OCIsIm5hbWUiOiJQYXRyaWNrIE1vdHNjaCIsIm9pZCI6IjdkMDhhYWI5LWExNzAtNDk3NS04ODk4LWJjN2UwYTk1NDg4ZSIsInBsYXRmIjoiMyIsInB1aWQiOiIxMDAzN0ZGRThDREQ2QTgyIiwicmgiOiIxLkFRc0E2NnBSYW1ja2hrR1ZCQ29GcnR4Wkh3TUFBQUFBQUFBQXdBQUFBQUFBQUFDRUFEQUxBQS4iLCJzY3AiOiJNYWlsLlJlYWRXcml0ZSBvcGVuaWQgcHJvZmlsZSBVc2VyLlJlYWQgZW1haWwiLCJzaWQiOiIyOTI0ZTgxMS0xMTM1LTQ0ZTItOGUxYi1kMmU2YmVhZmI3ZTUiLCJzaWduaW5fc3RhdGUiOlsia21zaSJdLCJzdWIiOiJJZzBpcDN4YWRiTGl1S3piRmd3VmhOSU1fRHpHMHdweGlFRmIySll1Y240IiwidGVuYW50X3JlZ2lvbl9zY29wZSI6IkVVIiwidGlkIjoiNmE1MWFhZWItMjQ2Ny00MTg2LTk1MDQtMmEwNWFlZGM1OTFmIiwidW5pcXVlX25hbWUiOiJwLm1vdHNjaEB2YWx1ZW9uLmNoIiwidXBuIjoicC5tb3RzY2hAdmFsdWVvbi5jaCIsInV0aSI6IndScENlYUtKNUVtWXdDN0xUbTJIQUEiLCJ2ZXIiOiIxLjAiLCJ3aWRzIjpbIjE1OGMwNDdhLWM5MDctNDU1Ni1iN2VmLTQ0NjU1MWE2YjVmNyIsIjliODk1ZDkyLTJjZDMtNDRjNy05ZDAyLWE2YWMyZDVlYTVjMyIsImNmMWMzOGU1LTM2MjEtNDAwNC1hN2NiLTg3OTYyNGRjZWQ3YyIsIjlmMDYyMDRkLTczYzEtNGQ0Yy04ODBhLTZlZGI5MDYwNmZkOCIsIjg5MmM1ODQyLWE5YTYtNDYzYS04MDQxLTcyYWEwOGNhM2NmNiIsImI3OWZiZjRkLTNlZjktNDY4OS04MTQzLTc2YjE5NGU4NTUwOSJdLCJ4bXNfZnRkIjoiSUhCXzdUNG9NLUpoejdrTnNjOGhNVnpPazIyZnV3cmdCYkRqZnlKb2xhY0JjM2RsWkdWdVl5MWtjMjF6IiwieG1zX2lkcmVsIjoiMjggMSIsInhtc19zdCI6eyJzdWIiOiJSMnZEMEcxbW1hWVJDN0pZV2NJU1pXMktEUGdOQmpCTEZsNmVMQUJfUFVNIn0sInhtc190Y2R0IjoxNDE4MjE0NTAxLCJ4bXNfdGRiciI6IkVVIn0.ESBS6AJiQaHa7xd59iZDBPvg66EJYEvrxLibqv8WM5edqNN0BMk3G7OFeDdivgf5BWCoRnDVsUII5S1Rb7eo-5nmWGC3xDq4uLzZ-ilOv4K2xErUUjU5x_tcpN67UtskBkb0LdwrTMzcmlc43iwkLGFlhKAELg07LuyGjxdjTN0izVc02eQL2-Z0mQVNI9ipUKeU40whmvDlI66nwnZM8lCRb7CU1g8_tgaQGqTmDrGbILz3zSNdyxPkzVag7g4c6nfs15bGZr3Vu_ouaz9zB3cdIFQ5vhI8Gb0IqOHZQvtmQ5zSDZ62Z0c2GBDEy7Zq0GZCxuBvmJjHPd3EoT9yXw", "refresh_token": "1.AQsA66pRamckhkGVBCoFrtxZHy0R58fcYTpPjNMIzEzXUEyEADALAA.AgABAwEAAABVrSpeuWamRam2jAF1XRQEAwDs_wUA9P8nMmZ5--kLI0YXEAYqwORMNt9GxWnIx_Y4RwBXPDOzc1RtmnjOmrpqlhoL1QYzpyfe2OKizkyx7z5q-NSA3gweH-kgKALAh0iVpZi6e3IqqW8igDjAtIwVMHS5HuV7-7WMQVILYJ9LfIap-CXvAIyDd0-3DkbjMc7T1V9xPKFIyDQN6TPNuojeW_oKZNd_EsBr2vyZjWjgmTVzb1sZwmNQFao0ZOcDpW5H6H2HXxL_pB6wk0K8ppBYNLj1bC_g1Si3htREBWC7W7UfngqENdaAkfBhH5UKoS73aarJZ9eZv-wGgAD-aDLubx2C5wRaLXWPpENMWK8st3Or9tWVfBDrFgU2JZ_UCnfd8gPKcpi-_qepFcb3FncdAsMpCQQLvXs7O4qSlBlD8QE6m2JrbrjtC5U14ZPoPekUwnd9V-m5wWCIpxBw1sMy04BxSx2xg9EziZ-_VjoIKEvB6m3A8uWXUDIYeki9QeNKl47es6wIEdUV1hmf8MSjtWfUK_azwvkIMWYSNiJ42hN74jBxxYfa7bDxajpTiu8iV75zuQv9kqR9mS2-lTvtH1Y9_qYtYQDQ4ldB0GaRx62-SasO9IUl_ugpyljA4lue7cblaGf-yrU3wjdbylGUr7t6XN3v4_yXs2_yQ5knfWsnUulxfvUfWMFzFwtYONI8eAiOmra1nYjnDAxydUyPJrquwvPge7T4Jlo8o2BvQJnwSy9NZTuD_1RZMu54LAw67FCe5HKKNDaZHtCrB_B1Qq0oEgLY9CiXF5RQj-78UUfBPLrhpFBsROU5Q9Q77tRTz36zI3WK6sZ4nTukQsPSILIBrG_6W7BTKl3l4vykNAT4u159QWIxONKU_1-E2XEZ3r9xc2ik1TFq-bX0-FhFtKtJvzqD-w239KUwn6Pc3vjk7OHZ7WRK3pABC_AubvNep3h8hbnaefNaTaNhGy2aossM8jYo4oy_tS6p1yhuKzM4et4ZBQJaWZBSZ-TYVFK9r7f6jMF3gaeD2RKBq5ZGGCaLOJLnCT1bSn0ThFRS5xlKMvLxH8qddwU4", "user_info": {"name": "Patrick Motsch", "email": "p.motsch@valueon.ch"}, "timestamp": "2025-05-07T01:45:00.286453"} \ No newline at end of file