diff --git a/app.py b/app.py index 9ce30d13..bf34d3a4 100644 --- a/app.py +++ b/app.py @@ -114,7 +114,14 @@ app.add_middleware( baseDir = pathlib.Path(__file__).parent staticFolder = baseDir / "static" os.makedirs(staticFolder, exist_ok=True) -app.mount("/static", StaticFiles(directory=str(staticFolder)), name="static") + +# Mount static files with proper configuration +app.mount("/static", StaticFiles(directory=str(staticFolder), html=True), name="static") + +# Add favicon route +@app.get("/favicon.ico") +async def favicon(): + return FileResponse(str(staticFolder / "favicon.ico"), media_type="image/x-icon") # General Elements @app.get("/", tags=["General"]) diff --git a/connectors/BACKUP-connectorDbJson.py b/connectors/BACKUP-connectorDbJson.py deleted file mode 100644 index f4bdea80..00000000 --- a/connectors/BACKUP-connectorDbJson.py +++ /dev/null @@ -1,569 +0,0 @@ -import json -import os -from typing import List, Dict, Any, Optional, Union -import logging - -logger = logging.getLogger(__name__) - -class DatabaseConnector: - """ - A connector for JSON-based data storage. - Provides generic database operations with tenant and user context support. - """ - def __init__(self, dbHost: str, dbDatabase: str, dbUser: str = None, dbPassword: str = None, - mandateId: int = None, userId: int = None, skipInitialIdLookup: bool = False): - """ - Initializes the JSON database connector. - - Args: - dbHost: Directory for the JSON files - dbDatabase: Database name - dbUser: Username for authentication (optional) - dbPassword: API key for authentication (optional) - mandateId: Context parameter for the tenant - userId: Context parameter for the user - skipInitialIdLookup: When True, skips looking up initial IDs for mandateId and userId - """ - # Store the input parameters - self.dbHost = dbHost - self.dbDatabase = dbDatabase - self.dbUser = dbUser - self.dbPassword = dbPassword - self.skipInitialIdLookup = skipInitialIdLookup - - # Check if context parameters are set - if mandateId is None or userId is None: - raise ValueError("mandateId and userId must be set") - - # Ensure the database directory exists - self.dbFolder = os.path.join(self.dbHost, self.dbDatabase) - os.makedirs(self.dbFolder, exist_ok=True) - - # Cache for loaded data - self._tablesCache = {} - - # Initialize system table - self._systemTableName = "_system" - self._initializeSystemTable() - - # Temporarily store mandateId and userId - self._mandateId = mandateId - self._userId = userId - - # If mandateId or userId are 0 and we're not skipping ID lookup, try to use the initial IDs - if not skipInitialIdLookup: - if mandateId == 0: - initialMandateId = self.getInitialId("mandates") - if initialMandateId is not None: - self._mandateId = initialMandateId - logger.info(f"Using initial mandateId: {initialMandateId} instead of 0") - - if userId == 0: - initialUserId = self.getInitialId("users") - if initialUserId is not None: - self._userId = initialUserId - logger.info(f"Using initial userId: {initialUserId} instead of 0") - - # Set the effective IDs as properties - self.mandateId = self._mandateId - self.userId = self._userId - - logger.info(f"DatabaseConnector initialized for directory: {self.dbFolder}") - logger.debug(f"Context: mandateId={self.mandateId}, userId={self.userId}") - - def _initializeSystemTable(self): - """Initializes the system table if it doesn't exist yet.""" - systemTablePath = self._getTablePath(self._systemTableName) - if not os.path.exists(systemTablePath): - emptySystemTable = {} - self._saveSystemTable(emptySystemTable) - logger.info(f"System table initialized in {systemTablePath}") - else: - # Load existing system table to ensure it's available - self._loadSystemTable() - logger.debug(f"Existing system table loaded from {systemTablePath}") - - def _loadSystemTable(self) -> Dict[str, int]: - """Loads the system table with the initial IDs.""" - # Check if system table is in cache - if f"_{self._systemTableName}" in self._tablesCache: - return self._tablesCache[f"_{self._systemTableName}"] - - systemTablePath = self._getTablePath(self._systemTableName) - try: - if os.path.exists(systemTablePath): - with open(systemTablePath, 'r', encoding='utf-8') as f: - data = json.load(f) - # Store in cache with special prefix to avoid collision with regular tables - self._tablesCache[f"_{self._systemTableName}"] = data - return data - else: - self._tablesCache[f"_{self._systemTableName}"] = {} - return {} - except Exception as e: - logger.error(f"Error loading the system table: {e}") - self._tablesCache[f"_{self._systemTableName}"] = {} - return {} - - def _saveSystemTable(self, data: Dict[str, int]) -> bool: - """Saves the system table with the initial IDs.""" - systemTablePath = self._getTablePath(self._systemTableName) - try: - with open(systemTablePath, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=2, ensure_ascii=False) - # Update cache - self._tablesCache[f"_{self._systemTableName}"] = data - return True - except Exception as e: - logger.error(f"Error saving the system table: {e}") - return False - - def _getTablePath(self, table: str) -> str: - """Returns the full path to a table file""" - return os.path.join(self.dbFolder, f"{table}.json") - - def _loadTable(self, table: str) -> List[Dict[str, Any]]: - """Loads a table from the corresponding JSON file""" - path = self._getTablePath(table) - - # If the table is the system table, load it directly - if table == self._systemTableName: - return [] # The system table is not treated like normal tables - - # If the table is already in the cache, use the cache - if table in self._tablesCache: - return self._tablesCache[table] - - # Otherwise load the file - try: - if os.path.exists(path): - with open(path, 'r', encoding='utf-8') as f: - data = json.load(f) - self._tablesCache[table] = data - - # If data was loaded and no initial ID is registered yet, - # register the ID of the first record (if available) - if data and not self.hasInitialId(table): - if "id" in data[0]: - self._registerInitialId(table, data[0]["id"]) - logger.info(f"Initial ID {data[0]['id']} for table {table} retroactively registered") - - return data - else: - # If the file doesn't exist, create an empty table - logger.info(f"New table {table}") - self._tablesCache[table] = [] - self._saveTable(table, []) - return [] - except Exception as e: - logger.error(f"Error loading table {table}: {e}") - return [] - - def _saveTable(self, table: str, data: List[Dict[str, Any]]) -> bool: - """Saves a table to the corresponding JSON file""" - # The system table is handled specially - if table == self._systemTableName: - return False - - path = self._getTablePath(table) - try: - with open(path, 'w', encoding='utf-8') as f: - json.dump(data, f, indent=2, ensure_ascii=False) - - # Update the cache - self._tablesCache[table] = data - return True - except Exception as e: - logger.error(f"Error saving table {table}: {e}") - return False - - def _filterByContext(self, records: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - """ - Filters records by tenant and user context, - if these fields exist in the record. - """ - filteredRecords = [] - - for record in records: - # Check if mandateId exists in the record and is not null - hasMandate = "mandateId" in record and record["mandateId"] is not None and record["mandateId"] != "" - - # Check if userId exists in the record and is not null - hasUser = "userId" in record and record["userId"] is not None and record["userId"] != "" - - # If both exist, filter accordingly - if hasMandate and hasUser: - if record["mandateId"] == self.mandateId: - filteredRecords.append(record) - # If only mandateId exists - elif hasMandate and not hasUser: - if record["mandateId"] == self.mandateId: - filteredRecords.append(record) - # If neither mandateId nor userId exist, add the record - elif not hasMandate and not hasUser: - filteredRecords.append(record) - - return filteredRecords - - - def _applyRecordFilter(self, records: List[Dict[str, Any]], recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]: - """Applies a record filter to the records""" - if not recordFilter: - return records - - filteredRecords = [] - - for record in records: - match = True - - for field, value in recordFilter.items(): - # Check if the field exists - if field not in record: - match = False - break - - # Handle type conversion for integer comparisons both ways - if isinstance(value, int) and isinstance(record[field], str) and record[field].isdigit(): - # Filter value is int, record value is string - if value != int(record[field]): - match = False - break - elif isinstance(value, str) and value.isdigit() and isinstance(record[field], int): - # Filter value is string, record value is int - if record[field] != int(value): - match = False - break - # Otherwise direct comparison - elif record[field] != value: - match = False - break - - if match: - filteredRecords.append(record) - - return filteredRecords - - def _registerInitialId(self, table: str, initialId: int) -> bool: - """ - Registers the initial ID for a table. - - Args: - table: Name of the table - initialId: The initial ID - - Returns: - True on success, False on error - """ - try: - # Load the current system table - systemData = self._loadSystemTable() - - # Only register if not already present - if table not in systemData: - systemData[table] = initialId - success = self._saveSystemTable(systemData) - if success: - logger.info(f"Initial ID {initialId} for table {table} registered") - return success - return True # If already present, this is not an error - except Exception as e: - logger.error(f"Error registering the initial ID for table {table}: {e}") - return False - - def _removeInitialId(self, table: str) -> bool: - """ - Removes the initial ID for a table from the system table. - - Args: - table: Name of the table - - Returns: - True on success, False on error - """ - try: - # Load the current system table - systemData = self._loadSystemTable() - - # Remove the entry if it exists - if table in systemData: - del systemData[table] - success = self._saveSystemTable(systemData) - if success: - logger.info(f"Initial ID for table {table} removed from system table") - return success - return True # If not present, this is not an error - except Exception as e: - logger.error(f"Error removing initial ID for table {table}: {e}") - return False - - # Public API - - def getTables(self) -> List[str]: - """ - Returns a list of all available tables. - - Returns: - List of table names - """ - tables = [] - - try: - for filename in os.listdir(self.dbFolder): - if filename.endswith('.json') and not filename.startswith('_'): - tableName = filename[:-5] # Remove the .json extension - tables.append(tableName) - except Exception as e: - logger.error(f"Error reading the database directory: {e}") - - return tables - - def getFields(self, table: str) -> List[str]: - """ - Returns a list of all fields in a table. - - Args: - table: Name of the table - - Returns: - List of field names - """ - # Load the table data - data = self._loadTable(table) - - if not data: - return [] - - # Take the first record as a reference for the fields - fields = list(data[0].keys()) if data else [] - - return fields - - def getSchema(self, table: str, language: str = None) -> Dict[str, Dict[str, Any]]: - """ - Returns a schema object for a table with data types and labels. - - Args: - table: Name of the table - language: Language for the labels (optional) - - Returns: - Schema object with fields, data types and labels - """ - # Load the table data - data = self._loadTable(table) - - schema = {} - - if not data: - return schema - - # Take the first record as a reference for the fields and data types - firstRecord = data[0] - - for field, value in firstRecord.items(): - # Determine the data type - dataType = type(value).__name__ - - # Create label (default is the field name) - label = field - - schema[field] = { - "type": dataType, - "label": label - } - - return schema - - def getRecordset(self, table: str, fieldFilter: List[str] = None, recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]: - """ - Returns a list of records from a table, filtered by criteria. - - Args: - table: Name of the table - fieldFilter: Filter for fields (which fields should be returned) - recordFilter: Filter for records (which records should be returned) - - Returns: - List of filtered records - """ - # Load the table data - data = self._loadTable(table) - logger.debug(f"getRecordset: data volume of {len(data)} bytes") - - # Filter by tenant and user context - filteredData = self._filterByContext(data) - - # Apply recordFilter if available - if recordFilter: - filteredData = self._applyRecordFilter(filteredData, recordFilter) - - # If fieldFilter is available, reduce the fields - if fieldFilter and isinstance(fieldFilter, list): - result = [] - for record in filteredData: - filteredRecord = {} - for field in fieldFilter: - if field in record: - filteredRecord[field] = record[field] - result.append(filteredRecord) - return result - - return filteredData - - def recordCreate(self, table: str, recordData: Dict[str, Any]) -> Dict[str, Any]: - """ - Creates a new record in the table. - - Args: - table: Name of the table - recordData: Data for the new record - - Returns: - The created record - """ - # Load the table data - data = self._loadTable(table) - - # Add mandateId and userId if not present or 0 - if "mandateId" not in recordData or recordData["mandateId"] == 0: - recordData["mandateId"] = self.mandateId - - if "userId" not in recordData or recordData["userId"] == 0: - recordData["userId"] = self.userId - - # Determine the next ID if not present - if "id" not in recordData: - nextId = 1 - if data: - nextId = max(record["id"] for record in data if "id" in record) + 1 - recordData["id"] = nextId - - # If the table is empty and a system ID should be registered - if not data: - self._registerInitialId(table, recordData["id"]) - logger.info(f"Initial ID {recordData['id']} for table {table} has been registered") - - # Add the new record - data.append(recordData) - - # Save the updated table - if self._saveTable(table, data): - return recordData - else: - raise ValueError(f"Error creating the record in table {table}") - - def recordDelete(self, table: str, recordId: Union[str, int]) -> bool: - """ - Deletes a record from the table. - - Args: - table: Name of the table - recordId: ID of the record to delete - - Returns: - True on success, False on error - """ - # Load table data - data = self._loadTable(table) - - # Search for the record - for i, record in enumerate(data): - if "id" in record and record["id"] == recordId: - # Check if the record belongs to the current mandate - if "mandateId" in record and record["mandateId"] != self.mandateId: - raise ValueError("Not your mandate") - - # Check if it's an initial record - initialId = self.getInitialId(table) - if initialId is not None and initialId == recordId: - # Remove this entry from the system table - self._removeInitialId(table) - logger.info(f"Initial ID {recordId} for table {table} has been removed from the system table") - - # Delete the record - del data[i] - - # Save the updated table - return self._saveTable(table, data) - - # Record not found - return False - - def recordModify(self, table: str, recordId: Union[str, int], recordData: Dict[str, Any]) -> Dict[str, Any]: - """ - Modifies a record in the table. - - Args: - table: Name of the table - recordId: ID of the record to modify - recordData: New data for the record - - Returns: - The updated record - """ - # Load table data - data = self._loadTable(table) - - # Search for the record - for i, record in enumerate(data): - if "id" in record and record["id"] == recordId: - # Check if the record belongs to the current mandate - if "mandateId" in record and record["mandateId"] != self.mandateId: - raise ValueError("Not your mandate") - - # Prevent changing the ID - if "id" in recordData and recordData["id"] != recordId: - raise ValueError(f"The ID of a record in table {table} cannot be changed") - - # Update the record - for key, value in recordData.items(): - data[i][key] = value - - # Save the updated table - if self._saveTable(table, data): - return data[i] - else: - raise ValueError(f"Error updating record in table {table}") - - # Record not found - raise ValueError(f"Record with ID {recordId} not found in table {table}") - - def hasInitialId(self, table: str) -> bool: - """ - Checks if an initial ID is registered for a table. - - Args: - table: Name of the table - - Returns: - True if an initial ID is registered, otherwise False - """ - systemData = self._loadSystemTable() - return table in systemData - - def getInitialId(self, table: str) -> Optional[int]: - """ - Returns the initial ID for a table. - - Args: - table: Name of the table - - Returns: - The initial ID or None if not present - """ - systemData = self._loadSystemTable() - initialId = systemData.get(table) - logger.debug(f"Database '{self.dbDatabase}': Initial ID for table '{table}' is {initialId}") - if initialId is None: - logger.debug(f"No initial ID found for table {table}") - return initialId - - def getAllInitialIds(self) -> Dict[str, int]: - """ - Returns all registered initial IDs. - - Returns: - Dictionary with table names as keys and initial IDs as values - """ - systemData = self._loadSystemTable() - return systemData.copy() # Return a copy to protect the original \ No newline at end of file diff --git a/env_dev.env b/env_dev.env index 71cd2dff..ad9fcbaa 100644 --- a/env_dev.env +++ b/env_dev.env @@ -3,7 +3,7 @@ # System Configuration APP_ENV_TYPE = dev APP_ENV_LABEL = Development Instance Patrick -APP_API_URL = http://localhost:8080 +APP_API_URL = http://localhost:8000 # Database Configuration System DB_SYSTEM_HOST=D:/Temp/_powerondb diff --git a/modules/agentAnalyst.py b/modules/agentAnalyst.py index 5e68c91e..93107371 100644 --- a/modules/agentAnalyst.py +++ b/modules/agentAnalyst.py @@ -38,11 +38,10 @@ class AgentAnalyst(AgentBase): def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" - self.mydom = mydom - + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: """ - Process a task by focusing on required outputs and using AI to generate them. + Process a task by focusing on required outputs and using AI to guide the analysis process. Args: task: Task dictionary with prompt, inputDocuments, outputSpecifications @@ -53,62 +52,49 @@ class AgentAnalyst(AgentBase): try: # Extract task information prompt = task.get("prompt", "") - inputDocuments = task.get("inputDocuments", []) outputSpecs = task.get("outputSpecifications", []) + workflow = task.get("context", {}).get("workflow", {}) # Check AI service if not self.mydom: return { - "feedback": "The Analyst agent requires an AI service to function.", + "feedback": "The Analyst agent requires an AI service to function effectively.", "documents": [] } - # Extract data from documents - focusing only on dataExtracted - datasets, documentContext = self._extractData(inputDocuments) + # Create analysis plan + if workflow: + self.workflowManager.logAdd(workflow, "Extracting data from documents...", level="info", progress=35) + analysisPlan = await self._createAnalysisPlan(prompt) - # Generate task analysis to understand what's needed - analysisPlan = await self._analyzeTask(prompt, documentContext, datasets, outputSpecs) + # Check if this is truly an analysis task + if not analysisPlan.get("requiresAnalysis", True): + return { + "feedback": "This task doesn't appear to require analysis. Please try a different agent.", + "documents": [] + } - # Generate all required output documents - documents = [] + # Analyze data + if workflow: + self.workflowManager.logAdd(workflow, "Analyzing task requirements...", level="info", progress=45) + analysisResults = await self._analyzeData(task, analysisPlan) - # If no output specs provided, create default analysis outputs - if not outputSpecs: - outputSpecs = [] + # Format results into requested output documents + totalSpecs = len(outputSpecs) + for i, spec in enumerate(outputSpecs): + progress = 50 + int((i / totalSpecs) * 40) # Progress from 50% to 90% + if self.workflowManager: + self.workflowManager.logAdd(workflow, f"Creating output {i+1}/{totalSpecs}...", level="info", progress=progress) - # Process each output specification - for spec in outputSpecs: - outputLabel = spec.get("label", "") - outputDescription = spec.get("description", "") - - # Determine type based on file extension - outputType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "txt" - - # Generate appropriate content based on output type - if outputType in ['png', 'jpg', 'jpeg', 'svg']: - # Create visualization - document = await self._createVisualization( - datasets, prompt, outputLabel, analysisPlan, outputDescription - ) - documents.append(document) - elif outputType in ['csv', 'json', 'xlsx']: - # Create data document - document = await self._createDataDocument( - datasets, prompt, outputLabel, analysisPlan, outputDescription - ) - documents.append(document) - else: - # Create text document (report, analysis, etc.) - document = await self._createTextDocument( - datasets, documentContext, prompt, outputLabel, - outputType, analysisPlan, outputDescription - ) - documents.append(document) + documents = await self._createOutputDocuments( + prompt, + analysisResults, + outputSpecs, + analysisPlan + ) # Generate feedback - feedback = f"{analysisPlan.get('analysisApproach')}" - if analysisPlan.get("keyInsights"): - feedback += f"\n\n{analysisPlan.get('keyInsights')}" + feedback = analysisPlan.get("feedback", f"I analyzed '{prompt[:50]}...' and generated {len(documents)} output documents.") return { "feedback": feedback, @@ -116,7 +102,7 @@ class AgentAnalyst(AgentBase): } except Exception as e: - logger.error(f"Error in analysis: {str(e)}", exc_info=True) + logger.error(f"Error during analysis: {str(e)}", exc_info=True) return { "feedback": f"Error during analysis: {str(e)}", "documents": [] @@ -196,69 +182,74 @@ class AgentAnalyst(AgentBase): return datasets, documentContext - async def _analyzeTask(self, prompt: str, context: str, datasets: Dict, outputSpecs: List) -> Dict: + async def _analyzeTask(self, prompt: str, documentContext: str, datasets: Dict[str, Any], outputSpecs: List[Dict[str, Any]]) -> Dict[str, Any]: """ - Use AI to analyze the task and create a plan for analysis. + Analyze the task requirements using AI. Args: prompt: The task prompt - context: Document context text - datasets: Dictionary of extracted datasets + documentContext: Context from input documents + datasets: Available datasets outputSpecs: Output specifications Returns: Analysis plan dictionary """ - # Prepare dataset information - datasetInfo = {} - for name, df in datasets.items(): - try: - datasetInfo[name] = { - "shape": df.shape, - "columns": df.columns.tolist(), - "dtypes": {col: str(df[col].dtype) for col in df.columns}, - "sample": df.head(3).to_dict(orient='records') - } - except: - datasetInfo[name] = {"error": "Could not process dataset"} - + # Create analysis prompt analysisPrompt = f""" - Analyze this data analysis task and create a plan. + Analyze this data analysis task and create a detailed plan: TASK: {prompt} - AVAILABLE DATA: - {json.dumps(datasetInfo, indent=2)} - DOCUMENT CONTEXT: - {context[:1000]}... (truncated) + {documentContext} - OUTPUT REQUIREMENTS: + AVAILABLE DATASETS: + {json.dumps(datasets, indent=2)} + + REQUIRED OUTPUTS: {json.dumps(outputSpecs, indent=2)} - Create a detailed analysis plan in JSON format with the following structure: + Create a detailed analysis plan in JSON format with: {{ - "analysisType": "statistical|trend|comparative|predictive|cluster|general", - "keyQuestions": ["question1", "question2"], - "recommendedVisualizations": [{{ - "type": "chart_type", - "dataSource": "dataset_name", - "variables": ["col1", "col2"], - "purpose": "explanation" - }}], - "keyInsights": "brief summary of initial insights", - "analysisApproach": "brief description of recommended approach" + "analysisSteps": [ + {{ + "step": "step description", + "purpose": "why this step is needed", + "datasets": ["dataset1", "dataset2"], + "techniques": ["technique1", "technique2"], + "outputs": ["output1", "output2"] + }} + ], + "visualizations": [ + {{ + "type": "visualization type", + "purpose": "what it shows", + "datasets": ["dataset1"], + "settings": {{"key": "value"}} + }} + ], + "insights": [ + {{ + "type": "insight type", + "description": "what to look for", + "datasets": ["dataset1"] + }} + ], + "feedback": "explanation of the analysis approach" }} - Only return valid JSON. No preamble or explanations. + Respond with ONLY the JSON object, no additional text or explanations. """ + try: + # Get analysis plan from AI response = await self.mydom.callAi([ - {"role": "system", "content": "You are a data analysis expert. Respond with valid JSON only."}, + {"role": "system", "content": "You are a data analysis expert. Create detailed analysis plans. Respond with valid JSON only."}, {"role": "user", "content": analysisPrompt} - ], produceUserAnswer = True) + ], produceUserAnswer=True) - # Extract JSON from response + # Extract JSON jsonStart = response.find('{') jsonEnd = response.rfind('}') + 1 @@ -266,154 +257,367 @@ class AgentAnalyst(AgentBase): plan = json.loads(response[jsonStart:jsonEnd]) return plan else: - # Fallback if JSON not found + # Fallback plan + logger.warning(f"Not able creating analysis plan, generating fallback plan") return { - "analysisType": "general", - "keyQuestions": ["What insights can be extracted from this data?"], - "recommendedVisualizations": [], - "keyInsights": "Analysis plan could not be created", - "analysisApproach": "General exploratory analysis" + "analysisSteps": [ + { + "step": "Basic data analysis", + "purpose": "Understand the data structure and content", + "datasets": list(datasets.keys()), + "techniques": ["summary statistics", "data visualization"], + "outputs": ["summary report", "basic visualizations"] + } + ], + "visualizations": [ + { + "type": "basic charts", + "purpose": "Show data distribution and relationships", + "datasets": list(datasets.keys()), + "settings": {} + } + ], + "insights": [ + { + "type": "basic insights", + "description": "Key findings from the data", + "datasets": list(datasets.keys()) + } + ], + "feedback": f"I'll analyze the data and provide insights about {prompt}" } except Exception as e: logger.warning(f"Error creating analysis plan: {str(e)}") + # Simple fallback plan return { - "analysisType": "general", - "keyQuestions": ["What insights can be extracted from this data?"], - "recommendedVisualizations": [], - "keyInsights": "Analysis plan could not be created", - "analysisApproach": "General exploratory analysis" + "analysisSteps": [ + { + "step": "Basic data analysis", + "purpose": "Understand the data structure and content", + "datasets": list(datasets.keys()), + "techniques": ["summary statistics", "data visualization"], + "outputs": ["summary report", "basic visualizations"] + } + ], + "visualizations": [ + { + "type": "basic charts", + "purpose": "Show data distribution and relationships", + "datasets": list(datasets.keys()), + "settings": {} + } + ], + "insights": [ + { + "type": "basic insights", + "description": "Key findings from the data", + "datasets": list(datasets.keys()) + } + ], + "feedback": f"I'll analyze the data and provide insights about {prompt}" + } + + async def _createAnalysisPlan(self, prompt: str) -> Dict[str, Any]: + """ + Create an analysis plan based on the task prompt. + + Args: + prompt: The task prompt + + Returns: + Analysis plan dictionary + """ + try: + # Create analysis prompt + analysisPrompt = f""" + Analyze this data analysis task and create a detailed plan: + + TASK: {prompt} + + Create a detailed analysis plan in JSON format with: + {{ + "requiresAnalysis": true/false, + "analysisSteps": [ + {{ + "step": "step description", + "purpose": "why this step is needed", + "techniques": ["technique1", "technique2"], + "outputs": ["output1", "output2"] + }} + ], + "visualizations": [ + {{ + "type": "visualization type", + "purpose": "what it shows", + "settings": {{"key": "value"}} + }} + ], + "insights": [ + {{ + "type": "insight type", + "description": "what to look for" + }} + ], + "feedback": "explanation of the analysis approach" + }} + + Respond with ONLY the JSON object, no additional text or explanations. + """ + + # Get analysis plan from AI + response = await self.mydom.callAi([ + {"role": "system", "content": "You are a data analysis expert. Create detailed analysis plans. Respond with valid JSON only."}, + {"role": "user", "content": analysisPrompt} + ], produceUserAnswer=True) + + # Extract JSON + jsonStart = response.find('{') + jsonEnd = response.rfind('}') + 1 + + if jsonStart >= 0 and jsonEnd > jsonStart: + plan = json.loads(response[jsonStart:jsonEnd]) + return plan + else: + # Fallback plan + logger.warning(f"Not able creating analysis plan, generating fallback plan") + return { + "requiresAnalysis": True, + "analysisSteps": [ + { + "step": "Basic data analysis", + "purpose": "Understand the data structure and content", + "techniques": ["summary statistics", "data visualization"], + "outputs": ["summary report", "basic visualizations"] + } + ], + "visualizations": [ + { + "type": "basic charts", + "purpose": "Show data distribution and relationships", + "settings": {} + } + ], + "insights": [ + { + "type": "basic insights", + "description": "Key findings from the data" + } + ], + "feedback": f"I'll analyze the data and provide insights about {prompt}" + } + + except Exception as e: + logger.warning(f"Error creating analysis plan: {str(e)}") + # Simple fallback plan + return { + "requiresAnalysis": True, + "analysisSteps": [ + { + "step": "Basic data analysis", + "purpose": "Understand the data structure and content", + "techniques": ["summary statistics", "data visualization"], + "outputs": ["summary report", "basic visualizations"] + } + ], + "visualizations": [ + { + "type": "basic charts", + "purpose": "Show data distribution and relationships", + "settings": {} + } + ], + "insights": [ + { + "type": "basic insights", + "description": "Key findings from the data" + } + ], + "feedback": f"I'll analyze the data and provide insights about {prompt}" } async def _createVisualization(self, datasets: Dict, prompt: str, outputLabel: str, analysisPlan: Dict, description: str) -> Dict: """ - Create visualization document using AI guidance. + Create a visualization based on the analysis plan. Args: datasets: Dictionary of datasets prompt: Original task prompt - outputLabel: Output filename - analysisPlan: Analysis plan from AI + outputLabel: Output file label + analysisPlan: Analysis plan description: Output description Returns: - Visualization document + Document dictionary with visualization """ - # Determine format from filename - formatType = outputLabel.split('.')[-1].lower() - if formatType not in ['png', 'jpg', 'jpeg', 'svg']: - formatType = 'png' - - # If no datasets available, create error message image - if not datasets: - plt.figure(figsize=(10, 6)) - plt.text(0.5, 0.5, "No data available for visualization", - ha='center', va='center', fontsize=14) - plt.tight_layout() - imgData = self._getImageBase64(formatType) - plt.close() - - return { - "label": outputLabel, - "content": imgData, - "metadata": { - "contentType": f"image/{formatType}" - } - } - - # Get recommended visualization from plan - recommendedViz = analysisPlan.get("recommendedVisualizations", []) - - # Prepare dataset info for the first dataset if none specified - if not recommendedViz and datasets: - name, df = next(iter(datasets.items())) - recommendedViz = [{ - "type": "auto", - "dataSource": name, - "variables": df.columns.tolist()[:5], - "purpose": "general analysis" - }] - - # Create visualization code prompt - vizPrompt = f""" - Generate Python matplotlib/seaborn code to create a visualization for: - - TASK: {prompt} - - VISUALIZATION REQUIREMENTS: - - Output format: {formatType} - - Filename: {outputLabel} - - Description: {description} - - RECOMMENDED VISUALIZATION: - {json.dumps(recommendedViz, indent=2)} - - AVAILABLE DATASETS: - """ - - # Add dataset info for recommended sources - for viz in recommendedViz: - dataSource = viz.get("dataSource") - if dataSource in datasets: - df = datasets[dataSource] - vizPrompt += f"\nDataset '{dataSource}':\n" - vizPrompt += f"- Shape: {df.shape}\n" - vizPrompt += f"- Columns: {df.columns.tolist()}\n" - vizPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n" - - vizPrompt += """ - Generate ONLY Python code that: - 1. Uses matplotlib and/or seaborn to create a clear visualization - 2. Sets figure size to (10, 6) - 3. Includes appropriate titles, labels, and legend - 4. Uses professional color schemes - 5. Handles any missing data gracefully - - Return ONLY executable Python code, no explanations or markdown. - """ - try: - # Get visualization code from AI - vizCode = await self.mydom.callAi([ - {"role": "system", "content": "You are a data visualization expert. Provide only executable Python code."}, - {"role": "user", "content": vizPrompt} - ], produceUserAnswer = True) + # Get visualization recommendations + vizRecommendations = analysisPlan.get("visualizations", []) - # Clean code - vizCode = vizCode.replace("```python", "").replace("```", "").strip() - - # Execute visualization code - plt.figure(figsize=(10, 6)) - - # Make local variables available to the code - localVars = { - "plt": plt, - "sns": sns, - "pd": pd, - "np": __import__('numpy') - } - - # Add datasets to local variables - for name, df in datasets.items(): - # Create a sanitized variable name - varName = ''.join(c if c.isalnum() else '_' for c in name) - localVars[varName] = df + if not vizRecommendations: + # Generate visualization recommendations if none provided + self.mydom.logAdd(analysisPlan.get("workflowId"), "Generating visualization recommendations...", level="info", progress=50) + vizPrompt = f""" + Based on this data and task, recommend appropriate visualizations. - # Also add with standard names for simpler code - if "df" not in localVars: - localVars["df"] = df - elif "df2" not in localVars: - localVars["df2"] = df + TASK: {prompt} + DESCRIPTION: {description} + + DATASETS: + {json.dumps({name: {"shape": df.shape, "columns": df.columns.tolist()} + for name, df in datasets.items()}, indent=2)} + + Recommend visualizations in JSON format: + {{ + "visualizations": [ + {{ + "type": "chart_type", + "dataSource": "dataset_name", + "variables": ["col1", "col2"], + "purpose": "explanation" + }} + ] + }} + """ + + response = await self.mydom.callAi([ + {"role": "system", "content": "You are a data visualization expert. Recommend appropriate visualizations based on the data and task."}, + {"role": "user", "content": vizPrompt} + ]) + + # Extract JSON + jsonStart = response.find('{') + jsonEnd = response.rfind('}') + 1 + + if jsonStart >= 0 and jsonEnd > jsonStart: + vizData = json.loads(response[jsonStart:jsonEnd]) + vizRecommendations = vizData.get("visualizations", []) - # Execute the visualization code - exec(vizCode, globals(), localVars) + # Determine format from filename + formatType = outputLabel.split('.')[-1].lower() + if formatType not in ['png', 'jpg', 'jpeg', 'svg']: + formatType = 'png' - # Capture the image - imgData = self._getImageBase64(formatType) - plt.close() + # If no datasets available, create error message image + if not datasets: + plt.figure(figsize=(10, 6)) + plt.text(0.5, 0.5, "No data available for visualization", + ha='center', va='center', fontsize=14) + plt.tight_layout() + imgData = self._getImageBase64(formatType) + plt.close() + + return { + "label": outputLabel, + "content": imgData, + "metadata": { + "contentType": f"image/{formatType}" + } + } - return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}") + # Prepare dataset info for the first dataset if none specified + if not vizRecommendations and datasets: + name, df = next(iter(datasets.items())) + vizRecommendations = [{ + "type": "auto", + "dataSource": name, + "variables": df.columns.tolist()[:5], + "purpose": "general analysis" + }] + + # Create visualization code prompt + vizPrompt = f""" + Generate Python matplotlib/seaborn code to create a visualization for: + + TASK: {prompt} + + VISUALIZATION REQUIREMENTS: + - Output format: {formatType} + - Filename: {outputLabel} + - Description: {description} + + RECOMMENDED VISUALIZATION: + {json.dumps(vizRecommendations, indent=2)} + + AVAILABLE DATASETS: + """ + + # Add dataset info for recommended sources + for viz in vizRecommendations: + dataSource = viz.get("dataSource") + if dataSource in datasets: + df = datasets[dataSource] + vizPrompt += f"\nDataset '{dataSource}':\n" + vizPrompt += f"- Shape: {df.shape}\n" + vizPrompt += f"- Columns: {df.columns.tolist()}\n" + vizPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n" + + vizPrompt += """ + Generate ONLY Python code that: + 1. Uses matplotlib and/or seaborn to create a clear visualization + 2. Sets figure size to (10, 6) + 3. Includes appropriate titles, labels, and legend + 4. Uses professional color schemes + 5. Handles any missing data gracefully + + Return ONLY executable Python code, no explanations or markdown. + """ + + try: + # Get visualization code from AI + vizCode = await self.mydom.callAi([ + {"role": "system", "content": "You are a data visualization expert. Provide only executable Python code."}, + {"role": "user", "content": vizPrompt} + ], produceUserAnswer = True) + + # Clean code + vizCode = vizCode.replace("```python", "").replace("```", "").strip() + + # Execute visualization code + plt.figure(figsize=(10, 6)) + + # Make local variables available to the code + localVars = { + "plt": plt, + "sns": sns, + "pd": pd, + "np": __import__('numpy') + } + + # Add datasets to local variables + for name, df in datasets.items(): + # Create a sanitized variable name + varName = ''.join(c if c.isalnum() else '_' for c in name) + localVars[varName] = df + + # Also add with standard names for simpler code + if "df" not in localVars: + localVars["df"] = df + elif "df2" not in localVars: + localVars["df2"] = df + + # Execute the visualization code + exec(vizCode, globals(), localVars) + + # Capture the image + imgData = self._getImageBase64(formatType) + plt.close() + + return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}") + + except Exception as e: + logger.error(f"Error creating visualization: {str(e)}", exc_info=True) + + # Create error message image + plt.figure(figsize=(10, 6)) + plt.text(0.5, 0.5, f"Visualization error: {str(e)}", + ha='center', va='center', fontsize=12) + plt.tight_layout() + imgData = self._getImageBase64(formatType) + plt.close() + + return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}") except Exception as e: logger.error(f"Error creating visualization: {str(e)}", exc_info=True) @@ -664,6 +868,102 @@ class AgentAnalyst(AgentBase): # Convert to base64 return base64.b64encode(imageData).decode('utf-8') + async def _analyzeData(self, task: Dict[str, Any], analysisPlan: Dict[str, Any]) -> Dict[str, Any]: + """ + Analyze data based on the analysis plan. + + Args: + task: Task dictionary with input documents and specifications + analysisPlan: Analysis plan from _createAnalysisPlan + + Returns: + Analysis results dictionary + """ + try: + # Extract data from input documents + inputDocuments = task.get("inputDocuments", []) + datasets, documentContext = self._extractData(inputDocuments) + + # Get task information + prompt = task.get("prompt", "") + outputSpecs = task.get("outputSpecifications", []) + + # Analyze task requirements + analysisResults = await self._analyzeTask(prompt, documentContext, datasets, outputSpecs) + + # Add datasets and context to results + analysisResults["datasets"] = datasets + analysisResults["documentContext"] = documentContext + + return analysisResults + + except Exception as e: + logger.error(f"Error analyzing data: {str(e)}", exc_info=True) + return { + "error": str(e), + "datasets": {}, + "documentContext": "" + } + + async def _createOutputDocuments(self, prompt: str, analysisResults: Dict[str, Any], + outputSpecs: List[Dict[str, Any]], analysisPlan: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Create output documents based on analysis results. + + Args: + prompt: Original task prompt + analysisResults: Results from data analysis + outputSpecs: List of output specifications + analysisPlan: Analysis plan from _createAnalysisPlan + + Returns: + List of document objects + """ + documents = [] + datasets = analysisResults.get("datasets", {}) + documentContext = analysisResults.get("documentContext", "") + + # Process each output specification + for spec in outputSpecs: + outputLabel = spec.get("label", "") + outputDescription = spec.get("description", "") + + # Determine format from filename + formatType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "txt" + + try: + # Create appropriate document based on format + if formatType in ["png", "jpg", "jpeg", "svg"]: + # Visualization output + document = await self._createVisualization( + datasets, prompt, outputLabel, analysisPlan, outputDescription + ) + elif formatType in ["csv", "json", "xlsx"]: + # Data document output + document = await self._createDataDocument( + datasets, prompt, outputLabel, analysisPlan, outputDescription + ) + else: + # Text document output (markdown, html, text) + document = await self._createTextDocument( + datasets, documentContext, prompt, outputLabel, formatType, + analysisPlan, outputDescription + ) + + documents.append(document) + + except Exception as e: + logger.error(f"Error creating output document {outputLabel}: {str(e)}", exc_info=True) + # Create error document + errorDoc = self.formatAgentDocumentOutput( + outputLabel, + f"Error creating document: {str(e)}", + "text/plain" + ) + documents.append(errorDoc) + + return documents + # Factory function for the Analyst agent def getAgentAnalyst(): diff --git a/modules/agentCoach.py b/modules/agentCoach.py index 6a0616bf..5995dff7 100644 --- a/modules/agentCoach.py +++ b/modules/agentCoach.py @@ -33,8 +33,7 @@ class AgentCoach(AgentBase): def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" - self.mydom = mydom - + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: """ Process a task by directly using AI to provide answers or content based on extracted data. diff --git a/modules/agentCoder.py b/modules/agentCoder.py index 60dc0072..8950e6c7 100644 --- a/modules/agentCoder.py +++ b/modules/agentCoder.py @@ -41,8 +41,7 @@ class AgentCoder(AgentBase): def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" - self.mydom = mydom - + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: """ Process a task and perform code development/execution. diff --git a/modules/agentDocumentation.py b/modules/agentDocumentation.py index 38b401d2..259795fe 100644 --- a/modules/agentDocumentation.py +++ b/modules/agentDocumentation.py @@ -30,8 +30,7 @@ class AgentDocumentation(AgentBase): def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" - self.mydom = mydom - + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: """ Process a task by focusing on required outputs and using AI to generate them. diff --git a/modules/agentEmail.py b/modules/agentEmail.py index effd0591..116e58fc 100644 --- a/modules/agentEmail.py +++ b/modules/agentEmail.py @@ -7,8 +7,8 @@ import logging import json import base64 import os -import msal import requests +import msal from typing import Dict, Any, List, Optional from modules.configuration import APP_CONFIG @@ -41,15 +41,11 @@ class AgentEmail(AgentBase): self.authority = None self.scopes = ["Mail.ReadWrite", "User.Read"] - # Token storage directory - self.token_dir = './token_storage' - if not os.path.exists(self.token_dir): - os.makedirs(self.token_dir) - logger.info(f"Created token storage directory: {self.token_dir}") + # API base URL for Microsoft authentication + self.api_base_url = APP_CONFIG.get("APP_API_URL", "(no-url)") def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" - self.mydom = mydom self._loadConfiguration() def _loadConfiguration(self): @@ -84,6 +80,7 @@ class AgentEmail(AgentBase): # Extract task information prompt = task.get("prompt", "") inputDocuments = task.get("inputDocuments", []) + outputSpecs = task.get("outputSpecifications", []) # Check AI service if not self.mydom: @@ -131,22 +128,36 @@ class AgentEmail(AgentBase): # Prepare output documents documents = [] - # Add HTML preview document - previewDoc = self.formatAgentDocumentOutput( - "email_preview.html", - htmlPreview, - "text/html" - ) - documents.append(previewDoc) - - # Add email template as JSON for reference - templateJson = json.dumps(emailTemplate, indent=2) - templateDoc = self.formatAgentDocumentOutput( - "email_template.json", - templateJson, - "application/json" - ) - documents.append(templateDoc) + # Process output specifications + for spec in outputSpecs: + label = spec.get("label", "") + description = spec.get("description", "") + + if label.endswith(".html"): + # Create the HTML template file + templateDoc = self.formatAgentDocumentOutput( + label, + emailTemplate["htmlBody"], # Use the actual HTML body, not the preview + "text/html" + ) + documents.append(templateDoc) + elif label.endswith(".json"): + # Create JSON template if requested + templateJson = json.dumps(emailTemplate, indent=2) + templateDoc = self.formatAgentDocumentOutput( + label, + templateJson, + "application/json" + ) + documents.append(templateDoc) + else: + # Default to preview for other cases + previewDoc = self.formatAgentDocumentOutput( + label, + htmlPreview, + "text/html" + ) + documents.append(previewDoc) # Prepare feedback message if draft_result: @@ -233,28 +244,20 @@ class AgentEmail(AgentBase): # Add document name to contents documentContents.append(f"\n\n--- {docName} ---\n") - # Process contents - hasAttachment = False - for content in doc.get("contents", []): - # Add extracted text to document contents - if content.get("dataExtracted"): - documentContents.append(content.get("dataExtracted", "")) - - # Prepare attachment if it has content data - if content.get("data"): - # Check if this content should be an attachment - # Typically files like PDFs, images, etc. - contentType = content.get("contentType", "") - if (not contentType.startswith("text/") or - contentType in ["application/pdf", "application/msword"]): - hasAttachment = True - - # If document has content to attach, add to attachments - if hasAttachment: + # Process document data directly + if doc.get("data"): + # Add to attachments with proper metadata attachments.append({ "name": docName, - "document": doc + "document": { + "data": doc["data"], + "mimeType": doc.get("mimeType", "application/octet-stream"), + "base64Encoded": doc.get("base64Encoded", False) + } }) + documentContents.append(f"Document attached: {docName}") + else: + documentContents.append(f"Document referenced: {docName}") return "\n".join(documentContents), attachments @@ -294,7 +297,7 @@ class AgentEmail(AgentBase): try: response = await self.mydom.callAi([ - {"role": "system", "content": "You are an email template specialist. Respond with valid JSON only."}, + {"role": "system", "content": "You are an email template specialist. Create professional emails. Respond with valid JSON only."}, {"role": "user", "content": emailPrompt} ], produceUserAnswer=True) @@ -306,7 +309,8 @@ class AgentEmail(AgentBase): template = json.loads(response[jsonStart:jsonEnd]) return template else: - # Fallback if JSON not found + # Fallback plan + logger.warning(f"Not able creating email template, generating fallback plan") return { "recipient": "recipient@example.com", "subject": "Information Regarding Your Request", @@ -377,125 +381,86 @@ class AgentEmail(AgentBase): """ return html - def _getCurrentUserToken(self): + def _getCurrentUserToken(self) -> tuple: """ - Get the current user's token from the token store. - Does not attempt to initiate authentication flow. - - Returns: - Tuple of (user info, access token) or (None, None) if no valid token + Get the current user's Microsoft token using the current user context. + Returns tuple of (user_info, access_token) or (None, None) if not authenticated. """ try: - # Check if we have any token files - if not os.path.exists(self.token_dir) or not os.listdir(self.token_dir): - logger.warning("No token files found. User needs to authenticate with Microsoft.") + if not self.mydom: + logger.error("No mydom interface available") return None, None - # Find the most recently modified token file - token_files = [os.path.join(self.token_dir, f) for f in os.listdir(self.token_dir) if f.endswith('.json')] - if not token_files: + # Get token data from database + token_data = self.mydom.getMsftToken() + if not token_data: + logger.info("No Microsoft token found for user") return None, None - most_recent = max(token_files, key=os.path.getmtime) - user_id = os.path.basename(most_recent).split('.')[0] + # Verify token is still valid + if not self._verifyToken(token_data.get("access_token")): + logger.info("Token invalid, attempting refresh") + if not self._refreshToken(token_data): + logger.info("Token refresh failed") + return None, None + # Get updated token data after refresh + token_data = self.mydom.getMsftToken() + + return token_data.get("user_info"), token_data.get("access_token") - # Load the token - token_data = self._loadTokenFromFile(user_id) - if not token_data or not token_data.get("access_token"): - logger.warning(f"No valid token data for user {user_id}") - return None, None - - # Get user info from token - user_info = self._getUserInfoFromToken(token_data["access_token"]) - if not user_info: - # Try to refresh the token - if self._refreshToken(user_id): - # Load the refreshed token - token_data = self._loadTokenFromFile(user_id) - if token_data and token_data.get("access_token"): - user_info = self._getUserInfoFromToken(token_data["access_token"]) - if user_info: - return user_info, token_data["access_token"] - - logger.warning(f"Could not get user info for user {user_id}") - return None, None - - return user_info, token_data["access_token"] except Exception as e: logger.error(f"Error getting current user token: {str(e)}") return None, None - - def _loadTokenFromFile(self, user_id): - """Load token data from a file""" - filename = os.path.join(self.token_dir, f"{user_id}.json") - if os.path.exists(filename): - try: - with open(filename, 'r') as f: - return json.load(f) - except Exception as e: - logger.error(f"Error loading token file: {str(e)}") - return None - return None - - def _getUserInfoFromToken(self, access_token): - """Get user information using the access token""" - headers = { - 'Authorization': f'Bearer {access_token}', - 'Content-Type': 'application/json' - } - + + def _verifyToken(self, token: str) -> bool: + """Verify the access token is valid""" try: + headers = { + 'Authorization': f'Bearer {token}', + 'Content-Type': 'application/json' + } + response = requests.get('https://graph.microsoft.com/v1.0/me', headers=headers) - if response.status_code == 200: - user_data = response.json() - return { - "name": user_data.get("displayName", ""), - "email": user_data.get("userPrincipalName", ""), - "id": user_data.get("id", "") - } - else: - logger.error(f"Error getting user info: {response.status_code} - {response.text}") - return None + return response.status_code == 200 + except Exception as e: - logger.error(f"Exception getting user info: {str(e)}") - return None - - def _refreshToken(self, user_id): + logger.error(f"Error verifying token: {str(e)}") + return False + + def _refreshToken(self, token_data: Dict[str, Any]) -> bool: """Refresh the access token using the stored refresh token""" - token_data = self._loadTokenFromFile(user_id) - if not token_data or not token_data.get("refresh_token"): - logger.warning("No refresh token available") - return False - - msal_app = msal.ConfidentialClientApplication( - self.client_id, - authority=self.authority, - client_credential=self.client_secret - ) - - result = msal_app.acquire_token_by_refresh_token( - token_data["refresh_token"], - scopes=self.scopes - ) - - if "error" in result: - logger.error(f"Error refreshing token: {result.get('error')}") - return False - - # Update tokens in storage - token_data["access_token"] = result["access_token"] - if "refresh_token" in result: - token_data["refresh_token"] = result["refresh_token"] - - # Save the updated token - filename = os.path.join(self.token_dir, f"{user_id}.json") try: - with open(filename, 'w') as f: - json.dump(token_data, f) - logger.info(f"Token saved for user: {user_id}") + if not token_data or not token_data.get("refresh_token"): + logger.warning("No refresh token available") + return False + + msal_app = msal.ConfidentialClientApplication( + self.client_id, + authority=self.authority, + client_credential=self.client_secret + ) + + result = msal_app.acquire_token_by_refresh_token( + token_data["refresh_token"], + scopes=self.scopes + ) + + if "error" in result: + logger.error(f"Error refreshing token: {result.get('error')}") + return False + + # Update token data + token_data["access_token"] = result["access_token"] + if "refresh_token" in result: + token_data["refresh_token"] = result["refresh_token"] + + # Save updated token + self.mydom.saveMsftToken(token_data) + logger.info("Access token refreshed successfully") return True + except Exception as e: - logger.error(f"Error saving token file: {str(e)}") + logger.error(f"Error refreshing token: {str(e)}") return False def _createDraftEmail(self, recipient, subject, body, attachments=None): @@ -522,8 +487,8 @@ class AgentEmail(AgentBase): def _createGraphDraftEmail(self, access_token, recipient, subject, body, attachments=None): """ - Create a draft email using Microsoft Graph API with fixed attachment handling. - Directly uses the document's data attribute for attachments. + Create a draft email using Microsoft Graph API. + Treats all files as binary attachments without content analysis. Args: access_token: Microsoft Graph access token @@ -540,7 +505,7 @@ class AgentEmail(AgentBase): 'Content-Type': 'application/json' } - # Prepare email data + # Prepare email data with proper structure email_data = { 'subject': subject, 'body': { @@ -561,94 +526,84 @@ class AgentEmail(AgentBase): email_data['attachments'] = [] for attachment in attachments: - # Get the document object doc = attachment.get('document', {}) file_name = attachment.get('name', 'attachment.file') logger.info(f"Processing attachment: {file_name}") - # Directly access the data attribute from the document - if 'data' in doc: - file_content = doc['data'] - is_base64 = doc.get('base64Encoded', False) - - # Determine content type - content_type = "application/octet-stream" - if 'mimeType' in doc: - content_type = doc['mimeType'] - elif 'contentType' in doc: - content_type = doc['contentType'] - - # Check if we need to encode the content - if not is_base64: - logger.info(f"Base64 encoding content for {file_name}") + # Get the document data directly + file_content = doc.get('data') + if not file_content: + logger.warning(f"No data found for attachment: {file_name}") + continue + + # Get content type from document metadata + mime_type = doc.get('mimeType', 'application/octet-stream') + is_base64 = doc.get('base64Encoded', False) + + # Handle content encoding + try: + if is_base64: + # Content is already base64 encoded + content_bytes = file_content + else: + # Content needs to be base64 encoded if isinstance(file_content, str): - try: - # Check if already valid base64 - base64.b64decode(file_content) - logger.info("Content appears to be valid base64 already") - except: - # Not valid base64, encode it - logger.info("Encoding string content to base64") - file_content = base64.b64encode(file_content.encode('utf-8')).decode('utf-8') + # For text files, encode the string to bytes first + content_bytes = base64.b64encode(file_content.encode('utf-8')).decode('utf-8') elif isinstance(file_content, bytes): - logger.info("Encoding bytes content to base64") - file_content = base64.b64encode(file_content).decode('utf-8') + # For binary files, encode directly + content_bytes = base64.b64encode(file_content).decode('utf-8') + else: + logger.warning(f"Unexpected content type for {file_name}") + continue + + # Calculate size from decoded content + decoded_size = len(base64.b64decode(content_bytes)) # Add attachment to email data - logger.info(f"Adding attachment: {file_name} ({content_type})") + logger.info(f"Adding attachment: {file_name} ({mime_type}, size: {decoded_size} bytes)") attachment_data = { '@odata.type': '#microsoft.graph.fileAttachment', 'name': file_name, - 'contentType': content_type, - 'contentBytes': file_content + 'contentType': mime_type, + 'contentBytes': content_bytes, + 'isInline': False, + 'size': decoded_size } email_data['attachments'].append(attachment_data) logger.info(f"Successfully added attachment: {file_name}") - else: - logger.warning(f"Document does not contain 'data' attribute: {file_name}") - # Try to find data in the fileId - if 'fileId' in doc: - logger.info(f"Found fileId: {doc['fileId']} - could implement fileId-based attachment lookup here") - # Future enhancement: implement file lookup by fileId + + except Exception as e: + logger.error(f"Error processing attachment {file_name}: {str(e)}") + continue - # Try to create draft using drafts folder endpoint (Option 1) + # Try to create draft using drafts folder endpoint try: - logger.info("Attempting to create draft email using drafts folder endpoint") + logger.info("Attempting to create draft email using messages endpoint") logger.info(f"Email data structure: subject={subject}, recipient={recipient}, " + - f"has_attachments={bool(email_data.get('attachments'))}, " + - f"attachment_count={len(email_data.get('attachments', []))}") + f"has_attachments={bool(email_data.get('attachments'))}, " + + f"attachment_count={len(email_data.get('attachments', []))}") + # Create the draft message response = requests.post( - 'https://graph.microsoft.com/v1.0/me/mailFolders/drafts/messages', + 'https://graph.microsoft.com/v1.0/me/messages', headers=headers, json=email_data ) if response.status_code >= 200 and response.status_code < 300: - logger.info("Successfully created draft email using drafts folder endpoint") + logger.info("Successfully created draft email using messages endpoint") return response.json() else: - logger.error(f"Drafts folder method failed: {response.status_code} - {response.text}") - - # Try fallback method with messages endpoint (Option 2) - logger.info("Trying fallback with messages endpoint") - response = requests.post( - 'https://graph.microsoft.com/v1.0/me/messages', - headers=headers, - json=email_data - ) - - if response.status_code >= 200 and response.status_code < 300: - logger.info("Successfully created draft email using messages endpoint") - return response.json() - else: - logger.error(f"Messages endpoint method also failed: {response.status_code} - {response.text}") - return None + logger.error(f"Messages endpoint method failed: {response.status_code} - {response.text}") + logger.error(f"Request headers: {headers}") + logger.error(f"Request body: {json.dumps(email_data, indent=2)}") + return None except Exception as e: logger.error(f"Exception creating draft email: {str(e)}", exc_info=True) - return None + return None # Factory function for the Email agent def getAgentEmail(): diff --git a/modules/agentWebcrawler.py b/modules/agentWebcrawler.py index 7f5cad09..56cac5b1 100644 --- a/modules/agentWebcrawler.py +++ b/modules/agentWebcrawler.py @@ -52,7 +52,6 @@ class AgentWebcrawler(AgentBase): def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" - self.mydom = mydom async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: """ @@ -68,6 +67,7 @@ class AgentWebcrawler(AgentBase): # Extract task information prompt = task.get("prompt", "") outputSpecs = task.get("outputSpecifications", []) + workflow = task.get("context", {}).get("workflow", {}) # Check AI service if not self.mydom: @@ -77,6 +77,8 @@ class AgentWebcrawler(AgentBase): } # Create research plan + if workflow: + self.workflowManager.logAdd(workflow, "Creating research plan...", level="info", progress=35) researchPlan = await self._createResearchPlan(prompt) # Check if this is truly a web research task @@ -87,9 +89,13 @@ class AgentWebcrawler(AgentBase): } # Gather raw material through web research - rawResults = await self._gatherResearchMaterial(researchPlan) + if workflow: + self.workflowManager.logAdd(workflow, "Gathering research material...", level="info", progress=45) + rawResults = await self._gatherResearchMaterial(researchPlan, workflow) # Format results into requested output documents + if workflow: + self.workflowManager.logAdd(workflow, "Creating output documents...", level="info", progress=55) documents = await self._createOutputDocuments( prompt, rawResults, @@ -142,9 +148,9 @@ class AgentWebcrawler(AgentBase): try: # Get research plan from AI response = await self.mydom.callAi([ - {"role": "system", "content": "You are a web research planning expert. Create precise research plans in JSON format only."}, + {"role": "system", "content": "You are a web research planning expert. Create precise research plans. Respond with valid JSON only."}, {"role": "user", "content": researchPrompt} - ]) + ], produceUserAnswer=True) # Extract JSON jsonStart = response.find('{') @@ -188,12 +194,13 @@ class AgentWebcrawler(AgentBase): "feedback": f"I'll conduct web research on '{prompt}' and gather relevant information." } - async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]: + async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any], workflow: Dict[str, Any]) -> List[Dict[str, Any]]: """ Gather research material based on the research plan. Args: researchPlan: Research plan dictionary + workflow: Current workflow object Returns: List of research results @@ -202,7 +209,10 @@ class AgentWebcrawler(AgentBase): # Process direct URLs directUrls = researchPlan.get("directUrls", [])[:self.maxUrl] - for url in directUrls: + for i, url in enumerate(directUrls): + progress = 45 + int((i / len(directUrls)) * 5) # Progress from 45% to 50% + if hasattr(self, 'workflowManager') and self.workflowManager: + self.workflowManager.logAdd(workflow, f"Processing direct URL {i+1}/{len(directUrls)}...", level="info", progress=progress) logger.info(f"Processing direct URL: {url}") try: # Fetch and extract content @@ -226,7 +236,10 @@ class AgentWebcrawler(AgentBase): # Process search terms searchTerms = researchPlan.get("searchTerms", [])[:self.maxSearchTerms] - for term in searchTerms: + for i, term in enumerate(searchTerms): + progress = 50 + int((i / len(searchTerms)) * 5) # Progress from 50% to 55% + if hasattr(self, 'workflowManager') and self.workflowManager: + self.workflowManager.logAdd(workflow, f"Searching term {i+1}/{len(searchTerms)}...", level="info", progress=progress) logger.info(f"Searching for: {term}") try: # Perform search @@ -255,7 +268,7 @@ class AgentWebcrawler(AgentBase): if len(allResults) >= self.maxResults: break - # Create summaries in parallel for all results + # Create summaries for all results allResults = await self._summarizeAllResults(allResults, researchPlan) return allResults @@ -302,19 +315,15 @@ class AgentWebcrawler(AgentBase): Only include information actually found in the content. No fabrications or assumptions. """ - if self.mydom: - summary = await self.mydom.callAi([ - {"role": "system", "content": "You summarize web content accurately and concisely, focusing only on what is actually in the content."}, - {"role": "user", "content": summaryPrompt} - ]) - - # Store the summary - result["summary"] = summary - else: - # Fallback if no AI service - logger.warning(f"Not able to summarize result, using fallback plan.") - result["summary"] = f"Content from {result['url']} ({len(content)} characters)" - + # Get summary from AI + summary = await self.mydom.callAi([ + {"role": "system", "content": "You are a web content summarization expert. Create concise summaries."}, + {"role": "user", "content": summaryPrompt} + ], produceUserAnswer=True) + + # Add summary to result + result["summary"] = summary.strip() + except Exception as e: logger.warning(f"Error summarizing result {i+1}: {str(e)}") result["summary"] = f"Error creating summary: {str(e)}" diff --git a/modules/documentProcessor.py b/modules/documentProcessor.py index d3b637e1..ee48dc07 100644 --- a/modules/documentProcessor.py +++ b/modules/documentProcessor.py @@ -17,6 +17,10 @@ pdfExtractorLoaded = False officeExtractorLoaded = False imageProcessorLoaded = False +class FileProcessingError(Exception): + """Custom exception for file processing errors.""" + pass + def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]: """ Main function for extracting content from a file based on its MIME type. @@ -38,8 +42,50 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis # Extract content based on MIME type contents = [] + # Try to detect actual file type from content for unknown MIME types + if mimeType == "application/octet-stream": + # Check file extension first + ext = os.path.splitext(fileName)[1].lower() + if ext: + # Map common extensions to MIME types + ext_to_mime = { + '.txt': 'text/plain', + '.md': 'text/markdown', + '.csv': 'text/csv', + '.json': 'application/json', + '.xml': 'application/xml', + '.js': 'application/javascript', + '.py': 'application/x-python', + '.svg': 'image/svg+xml', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.png': 'image/png', + '.gif': 'image/gif', + '.pdf': 'application/pdf', + '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + '.doc': 'application/msword', + '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + '.xls': 'application/vnd.ms-excel', + '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + '.ppt': 'application/vnd.ms-powerpoint' + } + if ext in ext_to_mime: + mimeType = ext_to_mime[ext] + logger.info(f"Detected MIME type {mimeType} from extension {ext}") + else: + logger.warning(f"Unknown file extension {ext} for file {fileName}") + + # Try to detect if it's text content + try: + text_content = fileContent.decode('utf-8') + logger.info(f"Successfully decoded file {fileName} as text") + contents.extend(extractTextContent(fileName, fileContent, "text/plain")) + except UnicodeDecodeError: + logger.info(f"File {fileName} is not text, treating as binary") + contents.extend(extractBinaryContent(fileName, fileContent, mimeType)) + # Text-based formats (excluding CSV which has its own handler) - if mimeType == "text/csv": + elif mimeType == "text/csv": contents.extend(extractCsvContent(fileName, fileContent)) # Then handle other text-based formats @@ -86,6 +132,7 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis # Binary data as fallback for unknown formats else: + logger.warning(f"Unknown MIME type {mimeType} for file {fileName}, treating as binary") contents.extend(extractBinaryContent(fileName, fileContent, mimeType)) # Fallback when no content could be extracted @@ -99,7 +146,7 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis "sequenceNr": 1, "name": '1_undefined', "ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin", - "contentType": mimeType, + "mimeType": mimeType, "data": encoded_data, "base64Encoded": True, "metadata": { @@ -130,13 +177,13 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis return contents except Exception as e: - logger.error(f"Error during content extraction: {str(e)}") + logger.error(f"Error during content extraction for file {fileMetadata.get('name', 'unknown')}: {str(e)}", exc_info=True) # Fallback on error - return original data return [{ "sequenceNr": 1, "name": fileMetadata.get("name", "unknown"), "ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin", - "contentType": fileMetadata.get("mimeType", "application/octet-stream"), + "mimeType": fileMetadata.get("mimeType", "application/octet-stream"), "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -206,7 +253,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_text", # Simplified naming "ext": fileExtension, - "contentType": "text/plain", + "mimeType": "text/plain", "data": textContent, "base64Encoded": False, "metadata": { @@ -225,7 +272,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_text", # Simplified naming "ext": fileExtension, - "contentType": "text/plain", + "mimeType": "text/plain", "data": textContent, "base64Encoded": False, "metadata": { @@ -242,7 +289,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -256,7 +303,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -282,7 +329,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_csv", # Simplified naming "ext": "csv", - "contentType": "text/csv", + "mimeType": "text/csv", "data": csvContent, "base64Encoded": False, "metadata": { @@ -302,7 +349,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_csv", # Simplified naming "ext": "csv", - "contentType": "text/csv", + "mimeType": "text/csv", "data": csvContent, "base64Encoded": False, "metadata": { @@ -319,7 +366,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": "csv", - "contentType": "text/csv", + "mimeType": "text/csv", "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -332,7 +379,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": "csv", - "contentType": "text/csv", + "mimeType": "text/csv", "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -364,7 +411,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_svg", # Simplified naming "ext": "svg", - "contentType": "image/svg+xml", + "mimeType": "image/svg+xml", "data": svgText, "base64Encoded": False, "metadata": { @@ -380,7 +427,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_text", "ext": "svg", - "contentType": "text/plain", + "mimeType": "text/plain", "data": svgText, "base64Encoded": False, "metadata": { @@ -401,7 +448,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_svg", # Simplified naming "ext": "svg", - "contentType": "image/svg+xml", + "mimeType": "image/svg+xml", "data": svgText, "base64Encoded": False, "metadata": { @@ -422,7 +469,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": "svg", - "contentType": "image/svg+xml", + "mimeType": "image/svg+xml", "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -438,7 +485,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": "svg", - "contentType": "image/svg+xml", + "mimeType": "image/svg+xml", "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -519,7 +566,7 @@ def extractImageContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis "sequenceNr": 1, "name": "1_image", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": encoded_data, "base64Encoded": True, "metadata": imageMetadata @@ -531,7 +578,7 @@ def extractImageContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis "sequenceNr": 2, "name": "2_text_image_info", # Simplified naming with label "ext": "txt", - "contentType": "text/plain", + "mimeType": "text/plain", "data": imageDescription, "base64Encoded": False, "metadata": { @@ -566,7 +613,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_pdf", # Simplified naming "ext": "pdf", - "contentType": "application/pdf", + "mimeType": "application/pdf", "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -604,7 +651,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": len(contents) + 1, "name": f"{len(contents) + 1}_text", # Simplified naming "ext": "txt", - "contentType": "text/plain", + "mimeType": "text/plain", "data": extractedText, "base64Encoded": False, "metadata": { @@ -639,7 +686,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": len(contents) + 1, "name": f"{len(contents) + 1}_image_page{pageNum+1}_{imgIndex+1}", # Simplified naming with label "ext": imageExt, - "contentType": f"image/{imageExt}", + "mimeType": f"image/{imageExt}", "data": base64.b64encode(imageBytes).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -667,7 +714,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]] "sequenceNr": 1, "name": "1_pdf", # Simplified naming "ext": "pdf", - "contentType": "application/pdf", + "mimeType": "application/pdf", "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -706,7 +753,7 @@ def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_word", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -743,7 +790,7 @@ def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_text", # Simplified naming "ext": "txt", - "contentType": "text/plain", + "mimeType": "text/plain", "data": extractedText, "base64Encoded": False, "metadata": { @@ -765,7 +812,7 @@ def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List "sequenceNr": 1, "name": "1_word", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -804,7 +851,7 @@ def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis "sequenceNr": 1, "name": "1_excel", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -845,7 +892,7 @@ def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis "sequenceNr": len(contents) + 1, "name": f"{len(contents) + 1}_csv_{sheetSafeName}", # Simplified naming with sheet label "ext": "csv", - "contentType": "text/csv", + "mimeType": "text/csv", "data": csvContent, "base64Encoded": False, "metadata": { @@ -867,7 +914,7 @@ def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis "sequenceNr": 1, "name": "1_excel", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -897,7 +944,7 @@ def extractPowerpointContent(fileName: str, fileContent: bytes, mimeType: str) - "sequenceNr": 1, "name": "1_powerpoint", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { @@ -923,11 +970,165 @@ def extractBinaryContent(fileName: str, fileContent: bytes, mimeType: str) -> Li "sequenceNr": 1, "name": "1_binary", # Simplified naming "ext": fileExtension, - "contentType": mimeType, + "mimeType": mimeType, "data": base64.b64encode(fileContent).decode('utf-8'), "base64Encoded": True, "metadata": { "isText": False, "format": "binary" } - }] \ No newline at end of file + }] + +def processFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]: + """ + Process a file and return its contents as a list of documents. + + Args: + fileContent: Binary content of the file + fileName: Name of the file + fileMetadata: Optional metadata about the file + + Returns: + List of document dictionaries + """ + try: + # Get file extension and MIME type + fileExtension = os.path.splitext(fileName)[1].lower()[1:] + mimeType = fileMetadata.get("mimeType", self.mydom.getMimeType(fileName)) if fileMetadata else self.mydom.getMimeType(fileName) + + # Process based on file type + if mimeType.startswith("image/"): + return self._processImageFile(fileContent, fileName, fileExtension, mimeType, fileMetadata) + elif mimeType == "application/pdf": + return self._processPdfFile(fileContent, fileName, fileMetadata) + elif mimeType == "text/csv": + return self._processCsvFile(fileContent, fileName, fileMetadata) + elif mimeType == "text/plain": + return self._processTextFile(fileContent, fileName, fileMetadata) + else: + # Default binary file handling + return [{ + "name": fileName, + "ext": fileExtension, + "mimeType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + + except Exception as e: + logger.error(f"Error processing file {fileName}: {str(e)}") + raise FileProcessingError(f"Error processing file: {str(e)}") + + def _processImageFile(self, fileContent: bytes, fileName: str, fileExtension: str, mimeType: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]: + """Process an image file.""" + try: + # Create image document + imageDoc = { + "name": fileName, + "ext": fileExtension, + "mimeType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "isImage": True, + "format": fileExtension + } + } + + # Add image description if available + if fileMetadata and "description" in fileMetadata: + imageDoc["metadata"]["description"] = fileMetadata["description"] + + return [imageDoc] + + except Exception as e: + logger.error(f"Error processing image file {fileName}: {str(e)}") + raise FileProcessingError(f"Error processing image file: {str(e)}") + + def _processPdfFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]: + """Process a PDF file.""" + try: + # Create PDF document + pdfDoc = { + "name": fileName, + "ext": "pdf", + "mimeType": "application/pdf", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "isPdf": True + } + } + + return [pdfDoc] + + except Exception as e: + logger.error(f"Error processing PDF file {fileName}: {str(e)}") + raise FileProcessingError(f"Error processing PDF file: {str(e)}") + + def _processCsvFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]: + """Process a CSV file.""" + try: + # Try to decode as text first + try: + csvContent = fileContent.decode('utf-8') + base64Encoded = False + except UnicodeDecodeError: + # If not valid UTF-8, encode as base64 + csvContent = base64.b64encode(fileContent).decode('utf-8') + base64Encoded = True + + # Create CSV document + csvDoc = { + "name": fileName, + "ext": "csv", + "mimeType": "text/csv", + "data": csvContent, + "base64Encoded": base64Encoded, + "metadata": { + "isText": True, + "isCsv": True, + "format": "csv" + } + } + + return [csvDoc] + + except Exception as e: + logger.error(f"Error processing CSV file {fileName}: {str(e)}") + raise FileProcessingError(f"Error processing CSV file: {str(e)}") + + def _processTextFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]: + """Process a text file.""" + try: + # Try to decode as text + try: + textContent = fileContent.decode('utf-8') + base64Encoded = False + except UnicodeDecodeError: + # If not valid UTF-8, encode as base64 + textContent = base64.b64encode(fileContent).decode('utf-8') + base64Encoded = True + + # Create text document + textDoc = { + "name": fileName, + "ext": "txt", + "mimeType": "text/plain", + "data": textContent, + "base64Encoded": base64Encoded, + "metadata": { + "isText": True + } + } + + return [textDoc] + + except Exception as e: + logger.error(f"Error processing text file {fileName}: {str(e)}") + raise FileProcessingError(f"Error processing text file: {str(e)}") \ No newline at end of file diff --git a/modules/gatewayInterface.py b/modules/gatewayInterface.py index 94359949..8008e1f9 100644 --- a/modules/gatewayInterface.py +++ b/modules/gatewayInterface.py @@ -123,27 +123,50 @@ class GatewayInterface: def _uam(self, table: str, recordset: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ - Unified user access management function that filters data based on user privileges. + Unified user access management function that filters data based on user privileges + and adds access control attributes. Args: table: Name of the table recordset: Recordset to filter based on access rules Returns: - Filtered recordset based on user privilege level + Filtered recordset with access control attributes """ userPrivilege = self.currentUser.get("privilege", "user") + filtered_records = [] # Apply filtering based on privilege if userPrivilege == "sysadmin": - return recordset # System admins see all records + filtered_records = recordset # System admins see all records elif userPrivilege == "admin": # Admins see records in their mandate - return [r for r in recordset if r.get("mandateId") == self.mandateId] + filtered_records = [r for r in recordset if r.get("mandateId") == self.mandateId] else: # Regular users # Users only see records they own within their mandate - return [r for r in recordset + filtered_records = [r for r in recordset if r.get("mandateId") == self.mandateId and r.get("userId") == self.userId] + + # Add access control attributes to each record + for record in filtered_records: + record_id = record.get("id") + + # Set access control flags based on user permissions + if table == "mandates": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("mandates", record_id) + record["_hideDelete"] = not self._canModify("mandates", record_id) + elif table == "users": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("users", record_id) + record["_hideDelete"] = not self._canModify("users", record_id) + else: + # Default access control for other tables + record["_hideView"] = False + record["_hideEdit"] = not self._canModify(table, record_id) + record["_hideDelete"] = not self._canModify(table, record_id) + + return filtered_records def _canModify(self, table: str, recordId: Optional[int] = None) -> bool: """ @@ -393,7 +416,11 @@ class GatewayInterface: def authenticateUser(self, username: str, password: str) -> Optional[Dict[str, Any]]: """Authenticates a user by username and password.""" - # Instead of using UAM filtering, directly get user from database + # Clear the users table from cache and reload it + if "users" in self.db._tablesCache: + del self.db._tablesCache["users"] + + # Get fresh user data users = self.db.getRecordset("users") user = next((u for u in users if u.get("username") == username), None) diff --git a/modules/lucydomInterface.py b/modules/lucydomInterface.py index b1fc5dcb..91d7769a 100644 --- a/modules/lucydomInterface.py +++ b/modules/lucydomInterface.py @@ -11,6 +11,7 @@ from typing import Dict, Any, List, Optional, Union import importlib import hashlib +import json from modules.mimeUtils import isTextMimeType, determineContentEncoding @@ -161,35 +162,72 @@ class LucyDOMInterface: def _uam(self, table: str, recordset: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ - Unified user access management function that filters data based on user privileges. + Unified user access management function that filters data based on user privileges + and adds access control attributes. Args: table: Name of the table recordset: Recordset to filter based on access rules Returns: - Filtered recordset based on user privilege level + Filtered recordset with access control attributes """ userPrivilege = self.currentUser.get("privilege", "user") + filtered_records = [] # Apply filtering based on privilege if userPrivilege == "sysadmin": - return recordset # System admins see all records + filtered_records = recordset # System admins see all records elif userPrivilege == "admin": # Admins see records in their mandate - return [r for r in recordset if r.get("mandateId") == self.mandateId] + filtered_records = [r for r in recordset if r.get("mandateId") == self.mandateId] else: # Regular users # To see all prompts from mandate 0 and own if table == "prompts": - return [r for r in recordset if + filtered_records = [r for r in recordset if (r.get("mandateId") == self.mandateId and r.get("userId") == self.userId) or (r.get("mandateId") == 0) ] - # Users see only their records - return [r for r in recordset + else: + # Users see only their records + filtered_records = [r for r in recordset if r.get("mandateId") == self.mandateId and r.get("userId") == self.userId] - + + # Add access control attributes to each record + for record in filtered_records: + record_id = record.get("id") + + # Set access control flags based on user permissions + if table == "prompts": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("prompts", record_id) + record["_hideDelete"] = not self._canModify("prompts", record_id) + elif table == "files": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("files", record_id) + record["_hideDelete"] = not self._canModify("files", record_id) + record["_hideDownload"] = not self._canModify("files", record_id) + elif table == "workflows": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("workflows", record_id) + record["_hideDelete"] = not self._canModify("workflows", record_id) + elif table == "workflowMessages": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("workflows", record.get("workflowId")) + record["_hideDelete"] = not self._canModify("workflows", record.get("workflowId")) + elif table == "workflowLogs": + record["_hideView"] = False # Everyone can view + record["_hideEdit"] = not self._canModify("workflows", record.get("workflowId")) + record["_hideDelete"] = not self._canModify("workflows", record.get("workflowId")) + else: + # Default access control for other tables + record["_hideView"] = False + record["_hideEdit"] = not self._canModify(table, record_id) + record["_hideDelete"] = not self._canModify(table, record_id) + + return filtered_records + def _canModify(self, table: str, recordId: Optional[int] = None) -> bool: """ Checks if the current user can modify (create/update/delete) records in a table. @@ -357,11 +395,14 @@ class LucyDOMInterface: return hashlib.sha256(fileContent).hexdigest() def checkForDuplicateFile(self, fileHash: str) -> Optional[Dict[str, Any]]: - """Checks if a file with the same hash already exists.""" - files = self.db.getRecordset("files", recordFilter={"fileHash": fileHash}) - filteredFiles = self._uam("files", files) - if filteredFiles: - return filteredFiles[0] + """Checks if a file with the same hash already exists for the current user and mandate.""" + files = self.db.getRecordset("files", recordFilter={ + "fileHash": fileHash, + "mandateId": self.mandateId, + "userId": self.userId + }) + if files: + return files[0] return None def getMimeType(self, filename: str) -> str: @@ -669,7 +710,7 @@ class LucyDOMInterface: fileHash = self.calculateFileHash(fileContent) logger.debug(f"Calculated file hash: {fileHash}") - # Check for duplicate + # Check for duplicate within same user/mandate existingFile = self.checkForDuplicateFile(fileHash) if existingFile: logger.info(f"Duplicate found for {fileName}: {existingFile['id']}") @@ -691,9 +732,6 @@ class LucyDOMInterface: # Save binary data logger.info(f"Saving file content to database for file: {fileName}") self.createFileData(dbFile["id"], fileContent) - - # Debug: Export file to static folder - self._exportFileToStatic(fileContent, dbFile["id"], fileName) logger.info(f"File upload process completed for: {fileName}") return dbFile @@ -730,12 +768,6 @@ class LucyDOMInterface: logger.error(f"Error downloading file {fileId}: {str(e)}") raise FileError(f"Error downloading file: {str(e)}") - def _exportFileToStatic(self, fileContent: bytes, fileId: int, fileName: str): - """Debug helper to export files to static folder.""" - debugFilename = f"{fileId}_{fileName}" - with open(f"./static/{debugFilename}", 'wb') as f: - f.write(fileContent) - # Workflow methods def getAllWorkflows(self) -> List[Dict[str, Any]]: @@ -1286,7 +1318,65 @@ class LucyDOMInterface: except Exception as e: logger.error(f"Error loading workflow state: {str(e)}") return None + + # Microsoft Login + + def getMsftToken(self) -> Optional[Dict[str, Any]]: + """Get Microsoft token data for the current user from database""" + try: + # Get token from database using current user's mandateId and userId + tokens = self.db.getRecordset("msftTokens", recordFilter={ + "mandateId": self.mandateId, + "userId": self.userId + }) + if tokens and len(tokens) > 0: + token_data = json.loads(tokens[0]["token_data"]) + logger.info(f"Retrieved Microsoft token for user {self.userId}") + return token_data + else: + logger.info(f"No Microsoft token found for user {self.userId}") + return None + + except Exception as e: + logger.error(f"Error retrieving Microsoft token: {str(e)}") + return None + + def saveMsftToken(self, token_data: Dict[str, Any]) -> bool: + """Save Microsoft token data for the current user to database""" + try: + # Check if token already exists + tokens = self.db.getRecordset("msftTokens", recordFilter={ + "mandateId": self.mandateId, + "userId": self.userId + }) + + if tokens and len(tokens) > 0: + # Update existing token + token_id = tokens[0]["id"] + updated_data = { + "token_data": json.dumps(token_data), + "updated_at": datetime.now().isoformat() + } + self.db.recordModify("msftTokens", token_id, updated_data) + logger.info(f"Updated Microsoft token for user {self.userId}") + else: + # Create new token + new_token = { + "mandateId": self.mandateId, + "userId": self.userId, + "token_data": json.dumps(token_data), + "created_at": datetime.now().isoformat(), + "updated_at": datetime.now().isoformat() + } + self.db.recordCreate("msftTokens", new_token) + logger.info(f"Saved new Microsoft token for user {self.userId}") + + return True + + except Exception as e: + logger.error(f"Error saving Microsoft token: {str(e)}") + return False # Singleton factory for LucyDOMInterface instances per context _lucydomInterfaces = {} diff --git a/modules/lucydomModel.py b/modules/lucydomModel.py index 68939580..782d0b0e 100644 --- a/modules/lucydomModel.py +++ b/modules/lucydomModel.py @@ -78,6 +78,31 @@ class FileData(BaseModel): base64Encoded: bool = Field(description="Flag indicating whether the data is base64 encoded") +class MsftToken(BaseModel): + """Data model for Microsoft authentication tokens""" + id: int = Field(description="Unique ID of the token") + mandateId: int = Field(description="ID of the associated mandate") + userId: int = Field(description="ID of the user") + token_data: str = Field(description="JSON string containing the token data") + created_at: str = Field(description="Timestamp when the token was created") + updated_at: str = Field(description="Timestamp when the token was last updated") + + label: Label = Field( + default=Label(default="Microsoft Token", translations={"en": "Microsoft Token", "fr": "Jeton Microsoft"}), + description="Label for the class" + ) + + # Labels for attributes + fieldLabels: Dict[str, Label] = { + "id": Label(default="ID", translations={}), + "mandateId": Label(default="Mandate ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}), + "userId": Label(default="User ID", translations={"en": "User ID", "fr": "ID d'utilisateur"}), + "token_data": Label(default="Token Data", translations={"en": "Token Data", "fr": "Données du jeton"}), + "created_at": Label(default="Created At", translations={"en": "Created At", "fr": "Créé le"}), + "updated_at": Label(default="Updated At", translations={"en": "Updated At", "fr": "Mis à jour le"}) + } + + # Workflow model classes class DocumentContent(BaseModel): @@ -85,7 +110,7 @@ class DocumentContent(BaseModel): sequenceNr: int = Field(1, description="Sequence number of the content in the source document") name: str = Field(description="Designation") ext: str = Field(description="Content extension for export: txt, csv, json, jpg, png") - contentType: str = Field(description="MIME type") + mimeType: str = Field(description="MIME type") summary: str = Field(description="Summary of the file content") data: str = Field(description="Actual content, text or base64 encoded based on base64Encoded flag") base64Encoded: bool = Field(description="Flag indicating whether the data is base64 encoded") @@ -97,6 +122,7 @@ class Document(BaseModel): name: str = Field(description="Name of the data object") ext: str = Field(description="Extension of the data object") fileId: int = Field(description="ID of the referenced file in the database") + mimeType: str = Field(description="MIME type") data: str = Field(description="Content of the data as text or base64 encoded based on base64Encoded flag") base64Encoded: bool = Field(description="Flag indicating whether the data is base64 encoded") contents: List[DocumentContent] = Field(description="Document contents") diff --git a/modules/workflowAgentsRegistry.py b/modules/workflowAgentsRegistry.py index 25d8d2ff..0d3e03b9 100644 --- a/modules/workflowAgentsRegistry.py +++ b/modules/workflowAgentsRegistry.py @@ -32,6 +32,7 @@ class AgentBase: self.description = "Basic agent functionality" self.capabilities = [] self.mydom = None + self.workflowManager = None # Will be set by workflow manager def setDependencies(self, mydom=None): """Set external dependencies for the agent.""" @@ -58,11 +59,16 @@ class AgentBase: Args: task: A dictionary containing: - taskId: Unique ID for this task - - workflowId: ID of the parent workflow (optional) + - workflowId: ID of the parent workflow - prompt: The main instruction for the agent - inputDocuments: List of document objects to process - outputSpecifications: List of required output documents - - context: Additional contextual information + - context: Additional contextual information including: + - workflow: The complete workflow object + - workflowRound: Current workflow round + - agentType: Type of agent + - timestamp: Task timestamp + - language: User language Returns: A dictionary containing: @@ -85,51 +91,45 @@ class AgentBase: """Wrapper for the utility function""" return isTextMimeType(mimeType) - def formatAgentDocumentOutput(self, label: str, content: Any, contentType: str = None) -> Dict[str, Any]: + def formatAgentDocumentOutput(self, label: str, content: Any, mimeType: str = None) -> Dict[str, Any]: """ - Helper method to properly format a document output with base64Encoded flag and metadata. + Format agent output as a document. Args: - label: Name of the document + label: Label for the document content: Content of the document - contentType: Optional content type for the document - - Returns: - Properly formatted document dictionary + mimeType: Optional MIME type for the document """ - import base64 - - # Determine if content should be base64 encoded - should_base64_encode = self.determineBase64EncodingFlag(label, content) - - # Process content based on type and encoding flag - formatted_content = content - - if should_base64_encode: - if isinstance(content, bytes): - # Convert binary to base64 - formatted_content = base64.b64encode(content).decode('utf-8') - elif isinstance(content, str): - try: - # Check if it's already base64 encoded - base64.b64decode(content) - # If we get here, it appears to be valid base64 - formatted_content = content - except: - # Not valid base64, so encode it - formatted_content = base64.b64encode(content.encode('utf-8')).decode('utf-8') - - # Create document with metadata + # Create document structure doc = { - "label": label, - "content": formatted_content, - "base64Encoded": should_base64_encode, - "metadata": {} + "id": str(uuid.uuid4()), + "name": label, + "ext": "txt", # Default extension + "data": content, + "base64Encoded": False, + "metadata": { + "isText": True + } } - # Add content type if provided - if contentType: - doc["metadata"]["contentType"] = contentType + # Set MIME type if provided + if mimeType: + doc["mimeType"] = mimeType + # Update extension based on MIME type + if mimeType == "text/markdown": + doc["ext"] = "md" + elif mimeType == "text/html": + doc["ext"] = "html" + elif mimeType == "text/csv": + doc["ext"] = "csv" + elif mimeType == "application/json": + doc["ext"] = "json" + elif mimeType.startswith("image/"): + doc["ext"] = mimeType.split("/")[1] + doc["metadata"]["isText"] = False + elif mimeType == "application/pdf": + doc["ext"] = "pdf" + doc["metadata"]["isText"] = False return doc @@ -214,6 +214,11 @@ class AgentRegistry: self.mydom = mydom self.updateAgentDependencies() + def setWorkflowManager(self, workflowManager): + """Set the workflow manager reference for all agents.""" + for agent in self.agents.values(): + agent.workflowManager = workflowManager + def updateAgentDependencies(self): """Update dependencies for all registered agents.""" for agentId, agent in self.agents.items(): @@ -245,8 +250,8 @@ class AgentRegistry: if agentIdentifier in self.agents: agent = self.agents[agentIdentifier] # Ensure the agent has the AI service - if hasattr(agent, 'setDependencies') and self.mydom: - agent.setDependencies(mydom=self.mydom) + if self.mydom: + agent.mydom = self.mydom return agent logger.error(f"Agent with identifier '{agentIdentifier}' not found") return None diff --git a/modules/workflowManager.py b/modules/workflowManager.py index 5d89d311..99237e02 100644 --- a/modules/workflowManager.py +++ b/modules/workflowManager.py @@ -10,8 +10,9 @@ import json import re import uuid import base64 -from datetime import datetime +from datetime import datetime, timedelta from typing import Dict, Any, List, Optional, Union, Tuple +import time from modules.mimeUtils import isTextMimeType, determineContentEncoding @@ -58,6 +59,7 @@ class WorkflowManager: self.mydom = domInterface(mandateId, userId) self.agentRegistry = getAgentRegistry() self.agentRegistry.setMydom(self.mydom) + self.agentRegistry.setWorkflowManager(self) # Set self as workflow manager for all agents ### Workflow State Machine Implementation @@ -132,6 +134,7 @@ class WorkflowManager: Returns: Updated workflow with processing results """ + startTime = time.time() try: # State 3: User Message Processing self.checkExitCriteria(workflow) @@ -161,8 +164,42 @@ class WorkflowManager: } self.messageAdd(workflow, responseMessage) - self.logAdd(workflow, f"Planned outputs: {len(objFinalDocuments)} documents", level="info", progress=20) - self.logAdd(workflow, f"Work plan created with {len(objWorkplan)} steps", level="info", progress=25) + # Add detailed log entry about the task plan + taskPlanLog = "Input: " + if objFinalDocuments: + taskPlanLog += ", ".join(objFinalDocuments) + "
" + else: + taskPlanLog += "No input files
" + + # Work Plan Steps + for i, task in enumerate(objWorkplan, 1): + agentName = task.get("agent", "unknown") + taskPlanLog += f"{i}. Agent {agentName}
" + + # Input Documents + inputDocs = task.get("inputDocuments", []) + if inputDocs: + inputLabels = [doc.get("label", "unknown") for doc in inputDocs] + taskPlanLog += f"- Input: {', '.join(inputLabels)}
" + + # Task Prompt + prompt = task.get('prompt', 'No prompt') + taskPlanLog += f"- Task: {prompt}
" + + # Output Documents + outputDocs = task.get("outputDocuments", []) + if outputDocs: + outputLabels = [doc.get("label", "unknown") for doc in outputDocs] + taskPlanLog += f"- Output: {', '.join(outputLabels)}
" + + # Final Results + taskPlanLog += "Result: " + if objFinalDocuments: + taskPlanLog += ", ".join(objFinalDocuments) + else: + taskPlanLog += "No result files" + + self.logAdd(workflow, taskPlanLog, level="info", progress=25) # State 5: Agent Execution objResults = [] @@ -199,6 +236,10 @@ class WorkflowManager: self.checkExitCriteria(workflow) self.workflowFinish(workflow) + # Update processing time + endTime = time.time() + workflow["dataStats"]["processingTime"] = endTime - startTime + return workflow except Exception as e: @@ -207,10 +248,15 @@ class WorkflowManager: workflow["status"] = "failed" workflow["lastActivity"] = datetime.now().isoformat() + # Update processing time even on error + endTime = time.time() + workflow["dataStats"]["processingTime"] = endTime - startTime + # Update in database self.mydom.updateWorkflow(workflow["id"], { "status": "failed", - "lastActivity": workflow["lastActivity"] + "lastActivity": workflow["lastActivity"], + "dataStats": workflow["dataStats"] }) self.logAdd(workflow, f"Workflow failed: {str(e)}", level="error", progress=100) @@ -241,7 +287,12 @@ class WorkflowManager: "messages": [], # Empty list - will be filled with references "messageIds": [], # Initialize empty messageIds list "logs": [], - "dataStats": {}, + "dataStats": { + "bytesSent": 0, + "bytesReceived": 0, + "tokensUsed": 0, + "processingTime": 0.0 + }, "currentRound": 1, "status": "running", "lastActivity": currentTime, @@ -287,11 +338,24 @@ class WorkflowManager: else: workflow["currentRound"] = 1 + # Ensure dataStats exists with correct field names + if "dataStats" not in workflow: + workflow["dataStats"] = { + "bytesSent": 0, + "bytesReceived": 0, + "tokensUsed": 0, + "processingTime": 0.0 + } + elif "tokenCount" in workflow["dataStats"]: + # Convert old tokenCount to tokensUsed if needed + workflow["dataStats"]["tokensUsed"] = workflow["dataStats"].pop("tokenCount", 0) + # Update in database - only the relevant workflow fields workflowUpdate = { "status": workflow["status"], "lastActivity": workflow["lastActivity"], - "currentRound": workflow["currentRound"] + "currentRound": workflow["currentRound"], + "dataStats": workflow["dataStats"] # Include updated dataStats } self.mydom.updateWorkflow(workflowId, workflowUpdate) @@ -382,6 +446,7 @@ Please analyze the request and create: 3. Do not define document inputs that don't exist or haven't been generated beforehand. 4. Create a logical sequence - earlier agents can create documents that are later used as inputs. 5. If the user has provided documents but hasn't clearly stated what they want, try to act according to the context. +6. ALL documents provided by the user (where fileSource is "user") MUST be included in the work plan, even if they don't have content summaries or if content extraction failed. Your answer must be strictly in the JSON_OUTPUT format, with no additions before or after the JSON object. @@ -415,6 +480,7 @@ JSON_OUTPUT = {{ ## RULES for inputDocuments: 1. The user request refers to documents where "fileSource" in available documents is "user". Those documents are in the focus for input 2. In case of redundant label in available documents, use document with highest sequenceNr if not specified differently +3. ALL documents provided by the user MUST be included in the work plan, even if they don't have content summaries or if content extraction failed ## STRICT RULES FOR document "label": 1. Every document label MUST include a proper file extension that matches the content type. @@ -472,6 +538,9 @@ JSON_OUTPUT = {{ return [] agentLabel = agent.label + # Set workflow manager reference on the agent + agent.workflowManager = self + # Log the current step outputLabels = [] for doc in task.get("outputDocuments", []): @@ -496,7 +565,7 @@ JSON_OUTPUT = {{ # Prepare input documents for the agent inputDocuments = await self.prepareAgentInputDocuments(task.get('inputDocuments', []), workflow) - + # Create a standardized task object for the agent as per state machine spec agentTask = { "taskId": str(uuid.uuid4()), @@ -505,20 +574,61 @@ JSON_OUTPUT = {{ "inputDocuments": inputDocuments, "outputSpecifications": outputSpecs, "context": { + "workflow": workflow, # Add the complete workflow object "workflowRound": workflow.get("currentRound", 1), "agentType": agentName, "timestamp": datetime.now().isoformat(), "language": self.mydom.userLanguage # Pass language to agent } } - + # Execute the agent with the standardized task try: # Process the task using the agent's standardized interface logger.debug("TASK: "+self.parseJson2text(agentTask)) logger.debug(f"Agent '{agentName}' AI service available: {agent.mydom is not None}") + # Calculate bytes sent before processing + bytesSent = len(json.dumps(agentTask).encode('utf-8')) + for doc in inputDocuments: + if doc.get('data'): + bytesSent += len(doc['data'].encode('utf-8')) + for content in doc.get('contents', []): + if content.get('data'): + bytesSent += len(content['data'].encode('utf-8')) + + # Process the task + startTime = time.time() agentResults = await agent.processTask(agentTask) + endTime = time.time() + + # Calculate bytes received + bytesReceived = len(json.dumps(agentResults).encode('utf-8')) + for doc in agentResults.get('documents', []): + if doc.get('content'): + bytesReceived += len(doc['content'].encode('utf-8')) + + # Calculate tokens used (now using bytes) + tokensUsed = bytesSent + bytesReceived + + # Update workflow statistics + if 'dataStats' not in workflow: + workflow['dataStats'] = { + 'bytesSent': 0, + 'bytesReceived': 0, + 'tokensUsed': 0, + 'processingTime': 0 + } + + workflow['dataStats']['bytesSent'] += bytesSent + workflow['dataStats']['bytesReceived'] += bytesReceived + workflow['dataStats']['tokensUsed'] += tokensUsed + workflow['dataStats']['processingTime'] += (endTime - startTime) + + # Update in database + self.mydom.updateWorkflow(workflow["id"], { + "dataStats": workflow['dataStats'] + }) logger.debug(f"Agent '{agentName}' completed task. RESULT: {self.parseJson2text(agentResults)}") @@ -710,6 +820,38 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} messageObject = self.messageAdd(workflow, messageObject) logger.debug(f"message_user = {self.parseJson2text(messageObject)}.") + + # Update statistics for user input + if role == "user": + # Calculate bytes sent + bytesSent = len(messageContent.encode('utf-8')) + for doc in additionalFiles: + if doc.get('data'): + bytesSent += len(doc['data'].encode('utf-8')) + for content in doc.get('contents', []): + if content.get('data'): + bytesSent += len(content['data'].encode('utf-8')) + + # Calculate tokens used (now using bytes) + tokensUsed = bytesSent + + # Update workflow statistics + if 'dataStats' not in workflow: + workflow['dataStats'] = { + 'bytesSent': 0, + 'bytesReceived': 0, + 'tokensUsed': 0, + 'processingTime': 0 + } + + workflow['dataStats']['bytesSent'] += bytesSent + workflow['dataStats']['tokensUsed'] += tokensUsed + + # Update in database + self.mydom.updateWorkflow(workflow["id"], { + "dataStats": workflow['dataStats'] + }) + return messageObject async def processFileIds(self, fileIds: List[int]) -> List[Dict[str, Any]]: @@ -789,8 +931,13 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} "fileId": fileId, "name": os.path.splitext(fileNameExt)[0] if os.path.splitext(fileNameExt)[0] else "noname", "ext": os.path.splitext(fileNameExt)[1][1:] if os.path.splitext(fileNameExt)[1] else "bin", + "mimeType": mimeType, "data": encodedData, "base64Encoded": base64Encoded, + "metadata": { + "isText": isTextFormat, + "base64Encoded": base64Encoded # For backward compatibility + }, "contents": [] } @@ -799,7 +946,7 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Add summaries to each content item for content in contents: - content["summary"] = await self.messageSummarizeContent(content) + content["summary"] = await self.getContentExtraction(content) # Ensure base64Encoded flag is set if "base64Encoded" not in content: @@ -861,97 +1008,93 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} return preparedInputs - - async def messageSummarizeContent(self, content: Dict[str, Any]) -> str: - return await self.getContentExtraction( - content, - "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content." - ) - async def processDocumentForAgent(self, document: Dict[str, Any], docSpec: Dict[str, Any]) -> Dict[str, Any]: - """ - Processes a document for an agent based on the document specification. - Uses AI to extract relevant content from the document based on the specification. - - Args: - document: The document to process - docSpec: The document specification from the project manager + """ + Processes a document for an agent based on the document specification. + Uses AI to extract relevant content from the document based on the specification. - Returns: - Processed document with AI-extracted content - """ - processedDoc = document.copy() - partSpec = docSpec.get("contentPart", "") - - # Process each content item in the document - if "contents" in processedDoc: - processedContents = [] + Args: + document: The document to process + docSpec: The document specification from the project manager + + Returns: + Processed document with AI-extracted content + """ + processedDoc = document.copy() + partSpec = docSpec.get("contentPart", "") - for content in processedDoc["contents"]: - # Check if part required - if partSpec != "" and partSpec != content.get("name"): - continue + # Process each content item in the document + if "contents" in processedDoc: + processedContents = [] + + for content in processedDoc["contents"]: + # Check if part required + if partSpec != "" and partSpec != content.get("name"): + continue - # Get the prompt from the document specification - summary = docSpec.get("prompt", "Extract the relevant information from this document") + # Get the prompt from the document specification + summary = docSpec.get("prompt", "Extract the relevant information from this document") + + # Process content using the shared helper function + processedContent = content.copy() + processedContent["dataExtracted"] = await self.getContentExtraction(content, summary) + processedContent["metadata"]["aiProcessed"] = True + + processedContents.append(processedContent) - # Process content using the shared helper function - processedContent = content.copy() - processedContent["dataExtracted"] = await self.getContentExtraction(content, summary) - processedContent["metadata"]["aiProcessed"] = True - - processedContents.append(processedContent) + processedDoc["contents"] = processedContents - processedDoc["contents"] = processedContents - - return processedDoc + return processedDoc async def getContentExtraction(self, content: Dict[str, Any], prompt: str = None) -> str: """ - Helper function that extracts or summarizes content based on its type (text/image/binary). + Helper function that extracts or summarizes content based on its encoding. + For base64 encoded content, uses callAi4Image. For non-base64 content, uses callAi. Args: content: Content item to analyze - prompt: Optional custom prompt for extraction (default prompts used if not provided) + prompt: Custom prompt for extraction (default prompts used if not provided) Returns: Extracted or summarized content as text """ - # Extract relevant information - data = content.get("data", "") - contentType = content.get("contentType", "text/plain") - base64Encoded = content.get("base64Encoded", False) - - # Default prompts if none provided - if prompt is None: - text_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content." - image_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this image." - else: - text_prompt = prompt - image_prompt = prompt - try: - # For image content, use the specialized image analysis - if base64Encoded: - return await self.mydom.callAi4Image(data, contentType, image_prompt) - - # For text data, use the regular AI processing - else: - return await self.mydom.callAi([ - {"role": "system", "content": "You are a content analyzer. Process the provided content as instructed."}, - {"role": "user", "content": f"{text_prompt}\n\n{data}"} - ]) + # Get content data and encoding status + data = content.get("data", "") + isBase64 = content.get("base64Encoded", False) + # Default prompts if none provided + if prompt is None: + textPrompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content." + imagePrompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this image." + else: + textPrompt = prompt + imagePrompt = prompt + + # Handle base64 encoded content + if isBase64: + try: + # Pass base64 encoded data directly to callAi4Image + return await self.mydom.callAi4Image(data, content.get("mimeType", "application/octet-stream"), imagePrompt) + except Exception as e: + logger.error(f"Error processing base64 content: {str(e)}") + return f"Error processing content: {str(e)}" + else: + # For non-base64 content, use callAi + return await self.mydom.callAi([ + {"role": "system", "content": "You are a content analyzer. Extract relevant information from the provided content."}, + {"role": "user", "content": f"{textPrompt}\n\nContent:\n{data}"} + ], produceUserAnswer=True) + except Exception as e: logger.error(f"Error processing content: {str(e)}") - return f"Content of type {contentType} (processing failed)" - - + return f"Error processing content: {str(e)}" def messageAdd(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> Dict[str, Any]: """ Adds a message to the workflow and updates lastActivity. Saves the message in the database and updates the workflow with references. + Also updates statistics for the message. Args: workflow: Workflow object @@ -989,6 +1132,35 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Set status if not present if "status" not in message: message["status"] = "step" + + # Calculate statistics for the message + bytesSent = len(message.get("content", "").encode('utf-8')) + for doc in message.get("documents", []): + if doc.get("data"): + bytesSent += len(doc["data"].encode('utf-8')) + for content in doc.get("contents", []): + if content.get("data"): + bytesSent += len(content["data"].encode('utf-8')) + + # Calculate tokens used (now using bytes) + tokensUsed = bytesSent + + # Update workflow statistics + if "dataStats" not in workflow: + workflow["dataStats"] = { + "bytesSent": 0, + "bytesReceived": 0, + "tokensUsed": 0, + "processingTime": 0 + } + + # Update statistics based on message role + if message["role"] == "user": + workflow["dataStats"]["bytesSent"] += bytesSent + workflow["dataStats"]["tokensUsed"] += tokensUsed + else: # assistant messages + workflow["dataStats"]["bytesReceived"] += bytesSent + workflow["dataStats"]["tokensUsed"] += tokensUsed # Add message to workflow workflow["messages"].append(message) @@ -1006,15 +1178,39 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Save to database - first the message itself self.mydom.createWorkflowMessage(message) - # Then save the workflow with updated references + # Then save the workflow with updated references and statistics workflowUpdate = { "lastActivity": currentTime, - "messageIds": workflow["messageIds"] # Update the messageIds field + "messageIds": workflow["messageIds"], + "dataStats": workflow["dataStats"] # Include updated statistics } self.mydom.updateWorkflow(workflow["id"], workflowUpdate) return message + def _trimDataInJson(self, jsonObj: Any) -> Any: + """ + Trims the data attribute in JSON objects while preserving other content. + + Args: + jsonObj: JSON object to process + + Returns: + Processed JSON object with trimmed data attribute + """ + if isinstance(jsonObj, dict): + # Create a copy to avoid modifying the original + result = jsonObj.copy() + if 'data' in result: + # Trim data attribute if it's a string + if isinstance(result['data'], str): + result['data'] = result['data'][:100] + '...' + # If it's a dict or list, convert to string and trim + else: + result['data'] = str(result['data'])[:100] + '...' + return result + return jsonObj + def logAdd(self, workflow: Dict[str, Any], message: str, level: str = "info", progress: Optional[int] = None) -> str: """ @@ -1043,11 +1239,24 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Set agentName from global settings agentName = GLOBAL_WORKFLOW_LABELS.get("systemName", "unknown") + # Process message if it contains JSON + processedMessage = message + try: + if isinstance(message, str) and ("{" in message or "[" in message): + # Try to parse as JSON + jsonObj = json.loads(message) + # Trim data attribute if present + processedJson = self._trimDataInJson(jsonObj) + processedMessage = json.dumps(processedJson) + except json.JSONDecodeError: + # If parsing fails, use original message + pass + # Create log entry logEntry = { "id": logId, "workflowId": workflow["id"], - "message": message, + "message": processedMessage, "type": level, "timestamp": datetime.now().isoformat(), "agentName": agentName, @@ -1066,11 +1275,11 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Also log in logger if level == "info": - logger.info(f"Workflow {workflow['id']}: {message}") + logger.info(f"Workflow {workflow['id']}: {processedMessage}") elif level == "warning": - logger.warning(f"Workflow {workflow['id']}: {message}") + logger.warning(f"Workflow {workflow['id']}: {processedMessage}") elif level == "error": - logger.error(f"Workflow {workflow['id']}: {message}") + logger.error(f"Workflow {workflow['id']}: {processedMessage}") return logId @@ -1086,56 +1295,69 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} List of file IDs for the saved documents """ fileIds = [] + used_names = set() # Track used names to prevent duplicates # Extract documents from agent results documents = agentResults.get("documents", []) for doc in documents: try: - # Extract label (filename) and content - label = doc.get("label", "unnamed_file.txt") - content = doc.get("content", "") + # Extract document data according to LucyDOM model + name = doc.get("name", "") + ext = doc.get("ext", "") + data = doc.get("data", "") base64Encoded = doc.get("base64Encoded", False) - # Split label into name and extension - name, ext = os.path.splitext(label) - if ext.startswith('.'): - ext = ext[1:] # Remove leading dot - elif not ext: - # If no extension is provided, default to .txt for text content - ext = "txt" - label = f"{label}.{ext}" + # Skip if no name or data + if not name or not data: + logger.warning(f"Skipping document with missing name or data. Name: {name}, Has data: {bool(data)}") + continue + + # Ensure unique filename + base_name = name + counter = 1 + while f"{base_name}.{ext}" in used_names: + base_name = f"{name}_{counter}" + counter += 1 + used_names.add(f"{base_name}.{ext}") # Convert content to bytes based on base64Encoded flag - if isinstance(content, str): + if isinstance(data, str): if base64Encoded: # Decode base64 to bytes try: import base64 - fileContent = base64.b64decode(content) + fileContent = base64.b64decode(data) except Exception as e: logger.warning(f"Failed to decode base64 content: {str(e)}") - fileContent = content.encode('utf-8') + fileContent = data.encode('utf-8') base64Encoded = False else: # Convert text to bytes - fileContent = content.encode('utf-8') + fileContent = data.encode('utf-8') else: # Already bytes - fileContent = content + fileContent = data # Determine MIME type based on extension - mimeType = self.mydom.getMimeType(label) + mimeType = self.mydom.getMimeType(f"{base_name}.{ext}") - # Save file to database - fileMeta = self.mydom.saveUploadedFile(fileContent, label) + # Create file metadata + fileMeta = self.mydom.createFile( + name=base_name, + mimeType=mimeType, + size=len(fileContent) + ) if fileMeta and "id" in fileMeta: - fileId = fileMeta["id"] - fileIds.append(fileId) - logger.info(f"Saved document '{label}' with file ID: {fileId} (base64Encoded: {base64Encoded})") + # Save file content + if self.mydom.createFileData(fileMeta["id"], fileContent): + fileIds.append(fileMeta["id"]) + logger.info(f"Saved document '{base_name}.{ext}' with file ID: {fileMeta['id']} (base64Encoded: {base64Encoded})") + else: + logger.warning(f"Failed to save content for document '{base_name}.{ext}'") else: - logger.warning(f"Failed to save document '{label}'") + logger.warning(f"Failed to create file metadata for '{base_name}.{ext}'") except Exception as e: logger.error(f"Error saving document from agent results: {str(e)}") @@ -1174,11 +1396,19 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Extract summaries from all contents contentSummaries = [] - for content in doc.get("contents", []): + if "contents" in doc and doc["contents"]: + for content in doc["contents"]: + contentSummaries.append({ + "contentPart": content.get("name", "noname"), + "metadata": content.get("metadata", ""), + "summary": content.get("summary", "No summary"), + }) + else: + # Add a default content summary if no contents exist contentSummaries.append({ - "contentPart": content.get("name", "noname"), - "metadata": content.get("metadata", ""), - "summary": content.get("summary", "No summary"), + "contentPart": "1_undefined", + "metadata": "", + "summary": "No content extracted", }) # Create document info @@ -1277,11 +1507,12 @@ filesDelivered = {self.parseJson2text(matchingDocuments)} # Singleton factory for the WorkflowManager _workflowManagers = {} +_workflowManagerLastAccess = {} # Track last access time for cleanup def getWorkflowManager(mandateId: int = 0, userId: int = 0) -> WorkflowManager: """ Returns a WorkflowManager for the specified context. - Reuses existing instances. + Reuses existing instances but implements cleanup for inactive instances. Args: mandateId: ID of the mandate @@ -1291,6 +1522,32 @@ def getWorkflowManager(mandateId: int = 0, userId: int = 0) -> WorkflowManager: WorkflowManager instance """ contextKey = f"{mandateId}_{userId}" + current_time = datetime.now() + + # Update last access time + _workflowManagerLastAccess[contextKey] = current_time + + # Cleanup old instances (older than 1 hour) + cleanup_threshold = current_time - timedelta(hours=1) + for key in list(_workflowManagers.keys()): + if _workflowManagerLastAccess.get(key, current_time) < cleanup_threshold: + del _workflowManagers[key] + del _workflowManagerLastAccess[key] + if contextKey not in _workflowManagers: _workflowManagers[contextKey] = WorkflowManager(mandateId, userId) - return _workflowManagers[contextKey] \ No newline at end of file + return _workflowManagers[contextKey] + +def cleanupWorkflowManager(mandateId: int, userId: int) -> None: + """ + Explicitly cleanup a WorkflowManager instance. + + Args: + mandateId: ID of the mandate + userId: ID of the user + """ + contextKey = f"{mandateId}_{userId}" + if contextKey in _workflowManagers: + del _workflowManagers[contextKey] + if contextKey in _workflowManagerLastAccess: + del _workflowManagerLastAccess[contextKey] \ No newline at end of file diff --git a/notes/changelog.txt b/notes/changelog.txt index 72a0aafe..75cb1e2d 100644 --- a/notes/changelog.txt +++ b/notes/changelog.txt @@ -1,28 +1,33 @@ ....................... TASKS + +agentDocumentation delivers a ".docx" file, but the content is a ".md" text markup file + +access management to extract into separate modules "lucydomAccess.py" and "gatewayAccess.py". Here to move the functions from "*Interface.py", which define what access which role has. + +check data extraction tabelle im pdf + +Check data extraction of types! + + + + ----------------------- OPEN PRIO1: -CHECK: If pictures not displayed to check utf-8 encoding in the base64 string!! general file writing and reading (example with svg) - -add connector to myoutlook +sharepoint connector with document search, content search, content extraction PRIO2: -todo an agent for "code writing and editing" connected to the codebase, working in loops over each document... - sharepoint connector with document search, content search, content extraction Split big files into content-parts Integrate NDA Text as modal form - Data governance agreement by login with checkbox -frontend to react - -frontend: no labels definition PRIO3: @@ -30,7 +35,7 @@ PRIO3: Tools to transfer incl funds: - Google SERPAPI (shelly) - Anthropic Claude (valueon + shelly) -- +- Cursor Pro ----------------------- DONE diff --git a/routes/routeMsft.py b/routes/routeMsft.py index 0c6edbd1..c5afed9b 100644 --- a/routes/routeMsft.py +++ b/routes/routeMsft.py @@ -1,12 +1,11 @@ from fastapi import APIRouter, HTTPException, Depends, Request, Response, status, Cookie from fastapi.responses import HTMLResponse, RedirectResponse, JSONResponse import msal -import os import logging -import sys import json -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, List from datetime import datetime, timedelta +import secrets from modules.auth import getCurrentActiveUser, getUserContext, createAccessToken, ACCESS_TOKEN_EXPIRE_MINUTES from modules.configuration import APP_CONFIG @@ -45,26 +44,67 @@ app_config = { "redirect_uri": REDIRECT_URI } -# Create a simple file-based token storage -TOKEN_DIR = './token_storage' -if not os.path.exists(TOKEN_DIR): - os.makedirs(TOKEN_DIR) - logger.info(f"Created token storage directory: {TOKEN_DIR}") +async def save_token_to_file(token_data, currentUser: Dict[str, Any]): + """Save token data to database using LucyDOMInterface""" + try: + # Get current user context + mandateId, userId = await getUserContext(currentUser) + if not mandateId or not userId: + logger.error("No user context available for token storage") + return False + + # Get LucyDOM interface for current user + mydom = getLucydomInterface( + mandateId=mandateId, + userId=userId + ) + if not mydom: + logger.error("No LucyDOM interface available for token storage") + return False + + # Save token to database + success = mydom.saveMsftToken(token_data) + if success: + logger.info("Token saved successfully to database") + return True + else: + logger.error("Failed to save token to database") + return False + + except Exception as e: + logger.error(f"Error saving token: {str(e)}") + return False -def save_token_to_file(user_id: str, token_data: Dict[str, Any]): - """Save token data to a file""" - filename = os.path.join(TOKEN_DIR, f"{user_id}.json") - with open(filename, 'w') as f: - json.dump(token_data, f) - logger.info(f"Token saved for user: {user_id}") - -def load_token_from_file(user_id: str) -> Optional[Dict[str, Any]]: - """Load token data from a file""" - filename = os.path.join(TOKEN_DIR, f"{user_id}.json") - if os.path.exists(filename): - with open(filename, 'r') as f: - return json.load(f) - return None +async def load_token_from_file(currentUser: Dict[str, Any]): + """Load token data from database using LucyDOMInterface""" + try: + # Get current user context + mandateId, userId = await getUserContext(currentUser) + if not mandateId or not userId: + logger.error("No user context available for token retrieval") + return None + + # Get LucyDOM interface for current user + mydom = getLucydomInterface( + mandateId=mandateId, + userId=userId + ) + if not mydom: + logger.error("No LucyDOM interface available for token retrieval") + return None + + # Get token from database + token_data = mydom.getMsftToken() + if token_data: + logger.info("Token loaded successfully from database") + return token_data + else: + logger.info("No token found in database") + return None + + except Exception as e: + logger.error(f"Error loading token: {str(e)}") + return None def get_user_info_from_token(access_token: str) -> Optional[Dict[str, Any]]: """Get user information using the access token""" @@ -112,9 +152,9 @@ def verify_token(token: str) -> bool: logger.error(f"Exception verifying token: {str(e)}") return False -def refresh_token(user_id: str) -> bool: +async def refresh_token(user_id: str, currentUser: Dict[str, Any]) -> bool: """Refresh the access token using the stored refresh token""" - token_data = load_token_from_file(user_id) + token_data = await load_token_from_file(currentUser) if not token_data or not token_data.get("refresh_token"): logger.warning("No refresh token available") return False @@ -139,45 +179,13 @@ def refresh_token(user_id: str) -> bool: if "refresh_token" in result: token_data["refresh_token"] = result["refresh_token"] - save_token_to_file(user_id, token_data) + await save_token_to_file(token_data, currentUser) logger.info("Access token refreshed successfully") return True -def silent_login(user_id: str) -> bool: - """Try to silently log in a user using their refresh token""" - token_data = load_token_from_file(user_id) - if not token_data or not token_data.get("refresh_token"): - logger.info(f"No refresh token found for user: {user_id}") - return False - - # Try to refresh the token - msal_app = msal.ConfidentialClientApplication( - app_config["client_id"], - authority=app_config["authority"], - client_credential=app_config["client_credential"] - ) - - result = msal_app.acquire_token_by_refresh_token( - token_data["refresh_token"], - scopes=SCOPES - ) - - if "error" in result: - logger.error(f"Error refreshing token: {result.get('error')}") - return False - - # Update tokens in storage - token_data["access_token"] = result["access_token"] - if "refresh_token" in result: - token_data["refresh_token"] = result["refresh_token"] - - save_token_to_file(user_id, token_data) - - return True - @router.get("/login") async def login(): - # Modified implementation without requiring current user + """Initiate Microsoft login for the current user""" try: # Create a confidential client application msal_app = msal.ConfidentialClientApplication( @@ -186,225 +194,293 @@ async def login(): client_credential=app_config["client_credential"] ) - # Build the auth URL + # Build the auth URL with a random state + state = secrets.token_urlsafe(32) + auth_url = msal_app.get_authorization_request_url( SCOPES, - state="anonymous-user", # Use a general state since we don't have user context + state=state, # Use random state redirect_uri=app_config["redirect_uri"] ) - logger.info(f"Redirecting to Microsoft login: {auth_url[:60]}...") + logger.info(f"Redirecting to Microsoft login") return RedirectResponse(auth_url) except Exception as e: logger.error(f"Error initiating Microsoft login: {str(e)}") raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Error initiating Microsoft login: {str(e)}" + detail=f"Failed to initiate Microsoft login: {str(e)}" ) - + @router.get("/auth/callback") -async def auth_callback(request: Request, code: str = None, state: str = None): - """Handle callback from Microsoft login""" +async def auth_callback(code: str, state: str, request: Request): + """Handle Microsoft OAuth callback""" try: - # Log callback for debugging - logger.info("Received callback from Microsoft login") - - if not code: - logger.error("No authorization code received in callback") - return JSONResponse( - status_code=status.HTTP_400_BAD_REQUEST, - content={"message": "No authorization code received"} - ) - - # Extract user and mandate info from state if available - user_id = None - mandate_id = None - - if state and state != "anonymous-user": - try: - mandate_id, user_id = state.split(":") - logger.info(f"State contains mandate_id: {mandate_id}, user_id: {user_id}") - except ValueError: - logger.warning(f"Invalid state format: {state}") - # Generate a generic user ID if state is invalid - user_id = f"user_{datetime.now().strftime('%Y%m%d%H%M%S')}" - else: - # For anonymous authentication, create a generic user ID - logger.info("Anonymous authentication (no user context)") - user_id = f"user_{datetime.now().strftime('%Y%m%d%H%M%S')}" - - # Create a confidential client application - msal_app = msal.ConfidentialClientApplication( - app_config["client_id"], - authority=app_config["authority"], - client_credential=app_config["client_credential"] + # Create MSAL app instance + app = msal.ConfidentialClientApplication( + client_id=CLIENT_ID, + client_credential=CLIENT_SECRET, + authority=AUTHORITY ) - # Get tokens using the authorization code - result = msal_app.acquire_token_by_authorization_code( - code, + # Exchange code for token + token_response = app.acquire_token_by_authorization_code( + code=code, scopes=SCOPES, - redirect_uri=app_config["redirect_uri"] + redirect_uri=REDIRECT_URI ) - if "error" in result: - logger.error(f"Error acquiring token: {result.get('error')}") - return JSONResponse( - status_code=status.HTTP_400_BAD_REQUEST, - content={"message": f"Error acquiring token: {result.get('error_description', result.get('error'))}"} + if "error" in token_response: + logger.error(f"Token acquisition failed: {token_response['error']}") + return HTMLResponse( + content=""" + + + Authentication Failed + + + +

Authentication Failed

+

Please try again.

+ + + + """, + status_code=400 ) - # Store user information - user_info = {} - if "id_token_claims" in result: - user_info = { - "name": result["id_token_claims"].get("name", ""), - "email": result["id_token_claims"].get("preferred_username", ""), - } - - # If we have user info from the token, use that for user_id - token_user_id = result["id_token_claims"].get("oid") or result["id_token_claims"].get("sub") - if token_user_id: - user_id = token_user_id - elif not user_id and user_info.get("email"): - # Fall back to email-based ID if no other ID is available - user_id = user_info.get("email", "user").replace("@", "_").replace(".", "_") - - # Save tokens to file - token_data = { - "access_token": result["access_token"], - "refresh_token": result.get("refresh_token", ""), - "user_info": user_info, - "timestamp": datetime.now().isoformat() - } - - # Ensure token directory exists - if not os.path.exists(TOKEN_DIR): - os.makedirs(TOKEN_DIR) - - # Save token to file - token_file = os.path.join(TOKEN_DIR, f"{user_id}.json") - with open(token_file, 'w') as f: - json.dump(token_data, f) - - logger.info(f"User authenticated: {user_info.get('email', 'unknown')}") - - # Create a success page - html_content = """ - + # Get user info from token + user_info = get_user_info_from_token(token_response["access_token"]) + if not user_info: + logger.error("Failed to get user info from token") + return HTMLResponse( + content=""" + + + Authentication Failed + + + +

Authentication Failed

+

Could not retrieve user information.

+ + + + """, + status_code=400 + ) + + # Add user info to token data + token_response["user_info"] = user_info + + # Store tokens in session storage for the frontend to pick up + response = HTMLResponse( + content=f""" - - - Authentication Successful - - - -
-

Authentication Successful

-
-

You have successfully authenticated with Microsoft.

-

You can now close this tab and return to the application.

-

Your email templates will now be able to create drafts in your mailbox.

- Close Window -
- - + + Authentication Successful + + + +

Authentication Successful

+

Welcome, {user_info.get('name', 'User')}!

+

This window will close automatically.

+ + """ - - return HTMLResponse(content=html_content) + ) - else: - logger.warning("No id_token_claims found in result") - return JSONResponse( - status_code=status.HTTP_400_BAD_REQUEST, - content={"message": "Failed to retrieve user information"} - ) + return response except Exception as e: - logger.error(f"Error in auth callback: {str(e)}", exc_info=True) - return JSONResponse( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - content={"message": f"Error in auth callback: {str(e)}"} + logger.error(f"Authentication failed: {str(e)}") + return HTMLResponse( + content=""" + + + Authentication Failed + + + +

Authentication Failed

+

An error occurred during authentication.

+ + + + """, + status_code=500 ) - + @router.get("/status") -async def auth_status( - msft_user_id: Optional[str] = Cookie(None), - currentUser: Dict[str, Any] = Depends(getCurrentActiveUser) -): +async def auth_status(currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): """Check Microsoft authentication status""" try: - # Get user ID - if not msft_user_id: - mandateId, userId = await getUserContext(currentUser) - user_id = str(userId) - else: - user_id = msft_user_id + # Get current user context + mandateId, userId = await getUserContext(currentUser) + if not mandateId or not userId: + logger.info("No user context found") + return JSONResponse({ + "authenticated": False, + "message": "Not authenticated with Microsoft" + }) + + # Check if we have a token for the current user + token_data = await load_token_from_file(currentUser) - # Check if user has a token - token_data = load_token_from_file(user_id) if not token_data: - return JSONResponse( - content={"authenticated": False, "message": "Not authenticated with Microsoft"} + logger.info(f"No token data found for user {userId}") + return JSONResponse({ + "authenticated": False, + "message": "Not authenticated with Microsoft" + }) + + # Verify token is still valid + if not verify_token(token_data["access_token"]): + logger.info("Token invalid, attempting refresh") + # Try to refresh the token + if not await refresh_token(userId, currentUser): + logger.info("Token refresh failed") + return JSONResponse({ + "authenticated": False, + "message": "Token expired and refresh failed" + }) + # Reload token data after refresh + token_data = await load_token_from_file(currentUser) + + # Get user info from token data + user_info = token_data.get("user_info") + if not user_info: + logger.info("No user info found in token data") + return JSONResponse({ + "authenticated": False, + "message": "No user information available" + }) + + logger.info(f"User {user_info.get('name')} is authenticated") + return JSONResponse({ + "authenticated": True, + "user": user_info + }) + + except Exception as e: + logger.error(f"Error checking authentication status: {str(e)}") + return JSONResponse({ + "authenticated": False, + "message": f"Error checking authentication status: {str(e)}" + }) + +@router.post("/logout") +async def logout(currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): + """Logout from Microsoft""" + try: + # Get current user context + mandateId, userId = await getUserContext(currentUser) + if not mandateId or not userId: + return JSONResponse({ + "message": "Not authenticated with Microsoft" + }) + + # Get LucyDOM interface for current user + mydom = getLucydomInterface( + mandateId=mandateId, + userId=userId + ) + if not mydom: + return JSONResponse({ + "message": "Not authenticated with Microsoft" + }) + + # Remove token from database + tokens = mydom.db.getRecordset("msftTokens", recordFilter={ + "mandateId": mandateId, + "userId": userId + }) + + if tokens and len(tokens) > 0: + mydom.db.recordDelete("msftTokens", tokens[0]["id"]) + logger.info(f"Removed Microsoft token for user {userId}") + + return JSONResponse({ + "message": "Successfully logged out from Microsoft" + }) + + except Exception as e: + logger.error(f"Error during logout: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Logout failed: {str(e)}" + ) + +@router.get("/token") +async def get_access_token(currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): + """Get the current user's access token for Microsoft Graph API""" + try: + # Check if we have a token for the current user + token_data = await load_token_from_file(currentUser) + + if not token_data: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Not authenticated with Microsoft" ) - # Check if token is valid - if not verify_token(token_data.get("access_token", "")): - # Try to refresh token - if refresh_token(user_id): - token_data = load_token_from_file(user_id) - user_info = token_data.get("user_info", {}) - return JSONResponse( - content={ - "authenticated": True, - "message": "Token refreshed successfully", - "user": user_info - } - ) - else: - return JSONResponse( - content={ - "authenticated": False, - "message": "Token expired and couldn't be refreshed" - } + # Verify token is still valid + if not verify_token(token_data["access_token"]): + # Try to refresh the token + if not await refresh_token(currentUser["id"], currentUser): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Token expired and refresh failed" ) + # Reload token data after refresh + token_data = await load_token_from_file(currentUser) + + return JSONResponse({ + "access_token": token_data["access_token"] + }) - # Token is valid, return user info - user_info = token_data.get("user_info", {}) - return JSONResponse( - content={ - "authenticated": True, - "message": "Authenticated with Microsoft", - "user": user_info - } - ) - except Exception as e: - logger.error(f"Error checking auth status: {str(e)}") - return JSONResponse( + logger.error(f"Error getting access token: {str(e)}") + raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - content={"message": f"Error checking auth status: {str(e)}"} + detail=f"Error getting access token: {str(e)}" ) + @router.post("/token") async def get_backend_token(request: Request): """Convert MSAL token to backend token""" @@ -467,3 +543,74 @@ async def get_backend_token(request: Request): status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Error processing MSAL token: {str(e)}" ) +======= +@router.post("/save-token") +async def save_token(token_data: Dict[str, Any], currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): + """Save Microsoft token data from frontend""" + try: + # Save token to database + success = await save_token_to_file(token_data, currentUser) + + if success: + return JSONResponse({ + "success": True, + "message": "Token saved successfully" + }) + else: + return JSONResponse({ + "success": False, + "message": "Failed to save token" + }) + + except Exception as e: + logger.error(f"Error saving token: {str(e)}") + return JSONResponse({ + "success": False, + "message": f"Error saving token: {str(e)}" + }) + +async def generateFinalMessage(self, objUserResponse: str, objFinalDocuments: List[str], objResults: List[Dict[str, Any]]) -> Dict[str, Any]: + """Generate the final message for the workflow""" + try: + # Get list of delivered documents + matchingDocuments = [] + for result in objResults: + if "documents" in result: + for doc in result["documents"]: + if doc.get("label") in objFinalDocuments: + matchingDocuments.append(doc.get("label")) + + # Use the mydom for language-aware AI calls + finalPrompt = await self.mydom.callAi([ + {"role": "system", "content": "You are a project manager, who delivers results to a user."}, + {"role": "user", "content": f""" +Give a brief summary of what has been accomplished, referencing the initial request (objUserResponse). List only the files that have been successfully delivered (filesDelivered). Keep the message concise and professional. + +Here the data: +objUserResponse = {self.parseJson2text(objUserResponse)} +filesDelivered = {self.parseJson2text(matchingDocuments)} +""" + } + ], produceUserAnswer=True) + + # Create basic message structure with proper fields + logger.debug(f"FINAL PROMPT = {self.parseJson2text(finalPrompt)}.") + finalMessage = { + "role": "assistant", + "agentName": "Project Manager", + "content": finalPrompt, + "documents": [] # DO NOT include the results documents, already with agents + } + + logger.debug(f"FINAL MESSAGE = {self.parseJson2text(finalMessage)}.") + return finalMessage + + except Exception as e: + logger.error(f"Error generating final message: {str(e)}") + return { + "role": "assistant", + "agentName": "Project Manager", + "content": "I apologize, but there was an error generating the final message. Please check the logs for more details.", + "documents": [] + } + diff --git a/static/10_email_preview.html b/static/10_email_preview.html deleted file mode 100644 index c900e097..00000000 --- a/static/10_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Verschiebung des Meetings auf Freitag - - - -
-
-

Email Template Preview

-
-
-
-
To:
-
peter.muster@domain.com
-
-
-
Subject:
-
Verschiebung des Meetings auf Freitag
-
- -
- -
- - - \ No newline at end of file diff --git a/static/11_email_template.json b/static/11_email_template.json deleted file mode 100644 index bf14e27b..00000000 --- a/static/11_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "peter.muster@domain.com", - "subject": "Verschiebung des Meetings auf Freitag", - "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.\n\nVielen Dank f\u00fcr Ihr Verst\u00e4ndnis.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", - "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.

Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

" -} \ No newline at end of file diff --git a/static/12_email_preview.html b/static/12_email_preview.html deleted file mode 100644 index 87962b01..00000000 --- a/static/12_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Anfrage zur Terminverschiebung - - - -
-
-

Email Template Preview

-
-
-
-
To:
-
peter.muster@domain.com
-
-
-
Subject:
-
Anfrage zur Terminverschiebung
-
- -
- -
- - - \ No newline at end of file diff --git a/static/13_email_template.json b/static/13_email_template.json deleted file mode 100644 index ab8a946c..00000000 --- a/static/13_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "peter.muster@domain.com", - "subject": "Anfrage zur Terminverschiebung", - "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, diese Nachricht trifft Sie wohl. Ich schreibe Ihnen, um eine Verschiebung unseres Termins von 10 Uhr auf Freitag zu erbitten. Bitte lassen Sie mich wissen, ob dies f\u00fcr Sie m\u00f6glich ist.\n\nVielen Dank im Voraus f\u00fcr Ihre Flexibilit\u00e4t.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", - "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, diese Nachricht trifft Sie wohl. Ich schreibe Ihnen, um eine Verschiebung unseres Termins von 10 Uhr auf Freitag zu erbitten. Bitte lassen Sie mich wissen, ob dies f\u00fcr Sie m\u00f6glich ist.

Vielen Dank im Voraus f\u00fcr Ihre Flexibilit\u00e4t.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

" -} \ No newline at end of file diff --git a/static/14_microsoft_authentication.html b/static/14_microsoft_authentication.html deleted file mode 100644 index b8a50d7f..00000000 --- a/static/14_microsoft_authentication.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - Microsoft Authentication Required - - - -
-

Microsoft Authentication Required

- -

To create email templates and drafts, you need to authenticate with your Microsoft account. Follow these steps:

- -
- 1 - Click the authentication link below -
- - Authenticate with Microsoft - -
- 2 - Sign in with your Microsoft account and grant the required permissions -
- -
- 3 - Return to this application and run the email agent again after completing authentication -
- -
-

Note: You only need to authenticate once. Your session will be remembered for future email operations.

-
-
- - - \ No newline at end of file diff --git a/static/15_microsoft_authentication.html b/static/15_microsoft_authentication.html deleted file mode 100644 index 521bae1c..00000000 --- a/static/15_microsoft_authentication.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - Microsoft Authentication Required - - - -
-

Microsoft Authentication Required

- -

To create email templates and drafts, you need to authenticate with your Microsoft account.

- -

The application will now initiate the Microsoft authentication process. Please follow the instructions in the authentication window.

- -
-

Note: You only need to authenticate once. Your session will be remembered for future email operations.

-
-
- - - \ No newline at end of file diff --git a/static/16_email_preview.html b/static/16_email_preview.html deleted file mode 100644 index 95096bad..00000000 --- a/static/16_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Verschiebung des Meetings auf Freitag - - - -
-
-

Email Template Preview

-
-
-
-
To:
-
peter.muster@domain.com
-
-
-
Subject:
-
Verschiebung des Meetings auf Freitag
-
- -
- -
- - - \ No newline at end of file diff --git a/static/17_email_template.json b/static/17_email_template.json deleted file mode 100644 index 90e8f9f3..00000000 --- a/static/17_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "peter.muster@domain.com", - "subject": "Verschiebung des Meetings auf Freitag", - "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting um 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser Termin f\u00fcr Sie passt.\n\nVielen Dank f\u00fcr Ihr Verst\u00e4ndnis.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", - "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting um 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser Termin f\u00fcr Sie passt.

Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.

Mit freundlichen Gr\u00fc\u00dfen,

[Ihr Name]

" -} \ No newline at end of file diff --git a/static/18_generated_code.py b/static/18_generated_code.py deleted file mode 100644 index b53f58c4..00000000 --- a/static/18_generated_code.py +++ /dev/null @@ -1,48 +0,0 @@ -inputFiles = [] # DO NOT CHANGE THIS LINE - -# REQUIREMENTS: - -import json -import csv -from io import StringIO - -def is_prime(n): - if n <= 1: - return False - if n <= 3: - return True - if n % 2 == 0 or n % 3 == 0: - return False - i = 5 - while i * i <= n: - if n % i == 0 or n % (i + 2) == 0: - return False - i += 6 - return True - -def generate_primes(limit): - primes = [] - num = 2 - while len(primes) < limit: - if is_prime(num): - primes.append(num) - num += 1 - return primes - -primes = generate_primes(1000) - -output = StringIO() -csv_writer = csv.writer(output) -for prime in primes: - csv_writer.writerow([prime]) - -result = { - "prime_numbers.csv": { - "content": output.getvalue(), - "base64Encoded": False, - "contentType": "text/csv" - } -} - -import json -print(json.dumps(result)) \ No newline at end of file diff --git a/static/19_execution_history.json b/static/19_execution_history.json deleted file mode 100644 index 8b61dc57..00000000 --- a/static/19_execution_history.json +++ /dev/null @@ -1,19 +0,0 @@ -[ - { - "attempt": 1, - "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\nimport json\nimport csv\nfrom io import StringIO\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\n\noutput = StringIO()\ncsv_writer = csv.writer(output)\nfor prime in primes:\n csv_writer.writerow([prime])\n\nresult = {\n \"prime_numbers.csv\": {\n \"content\": output.getvalue(),\n \"base64Encoded\": False,\n \"contentType\": \"text/csv\"\n }\n}\n\nimport json\nprint(json.dumps(result))", - "result": { - "success": true, - "output": "{\"prime_numbers.csv\": {\"content\": \"2\\r\\n3\\r\\n5\\r\\n7\\r\\n11\\r\\n13\\r\\n17\\r\\n19\\r\\n23\\r\\n29\\r\\n31\\r\\n37\\r\\n41\\r\\n43\\r\\n47\\r\\n53\\r\\n59\\r\\n61\\r\\n67\\r\\n71\\r\\n73\\r\\n79\\r\\n83\\r\\n89\\r\\n97\\r\\n101\\r\\n103\\r\\n107\\r\\n109\\r\\n113\\r\\n127\\r\\n131\\r\\n137\\r\\n139\\r\\n149\\r\\n151\\r\\n157\\r\\n163\\r\\n167\\r\\n173\\r\\n179\\r\\n181\\r\\n191\\r\\n193\\r\\n197\\r\\n199\\r\\n211\\r\\n223\\r\\n227\\r\\n229\\r\\n233\\r\\n239\\r\\n241\\r\\n251\\r\\n257\\r\\n263\\r\\n269\\r\\n271\\r\\n277\\r\\n281\\r\\n283\\r\\n293\\r\\n307\\r\\n311\\r\\n313\\r\\n317\\r\\n331\\r\\n337\\r\\n347\\r\\n349\\r\\n353\\r\\n359\\r\\n367\\r\\n373\\r\\n379\\r\\n383\\r\\n389\\r\\n397\\r\\n401\\r\\n409\\r\\n419\\r\\n421\\r\\n431\\r\\n433\\r\\n439\\r\\n443\\r\\n449\\r\\n457\\r\\n461\\r\\n463\\r\\n467\\r\\n479\\r\\n487\\r\\n491\\r\\n499\\r\\n503\\r\\n509\\r\\n521\\r\\n523\\r\\n541\\r\\n547\\r\\n557\\r\\n563\\r\\n569\\r\\n571\\r\\n577\\r\\n587\\r\\n593\\r\\n599\\r\\n601\\r\\n607\\r\\n613\\r\\n617\\r\\n619\\r\\n631\\r\\n641\\r\\n643\\r\\n647\\r\\n653\\r\\n659\\r\\n661\\r\\n673\\r\\n677\\r\\n683\\r\\n691\\r\\n701\\r\\n709\\r\\n719\\r\\n727\\r\\n733\\r\\n739\\r\\n743\\r\\n751\\r\\n757\\r\\n761\\r\\n769\\r\\n773\\r\\n787\\r\\n797\\r\\n809\\r\\n811\\r\\n821\\r\\n823\\r\\n827\\r\\n829\\r\\n839\\r\\n853\\r\\n857\\r\\n859\\r\\n863\\r\\n877\\r\\n881\\r\\n883\\r\\n887\\r\\n907\\r\\n911\\r\\n919\\r\\n929\\r\\n937\\r\\n941\\r\\n947\\r\\n953\\r\\n967\\r\\n971\\r\\n977\\r\\n983\\r\\n991\\r\\n997\\r\\n1009\\r\\n1013\\r\\n1019\\r\\n1021\\r\\n1031\\r\\n1033\\r\\n1039\\r\\n1049\\r\\n1051\\r\\n1061\\r\\n1063\\r\\n1069\\r\\n1087\\r\\n1091\\r\\n1093\\r\\n1097\\r\\n1103\\r\\n1109\\r\\n1117\\r\\n1123\\r\\n1129\\r\\n1151\\r\\n1153\\r\\n1163\\r\\n1171\\r\\n1181\\r\\n1187\\r\\n1193\\r\\n1201\\r\\n1213\\r\\n1217\\r\\n1223\\r\\n1229\\r\\n1231\\r\\n1237\\r\\n1249\\r\\n1259\\r\\n1277\\r\\n1279\\r\\n1283\\r\\n1289\\r\\n1291\\r\\n1297\\r\\n1301\\r\\n1303\\r\\n1307\\r\\n1319\\r\\n1321\\r\\n1327\\r\\n1361\\r\\n1367\\r\\n1373\\r\\n1381\\r\\n1399\\r\\n1409\\r\\n1423\\r\\n1427\\r\\n1429\\r\\n1433\\r\\n1439\\r\\n1447\\r\\n1451\\r\\n1453\\r\\n1459\\r\\n1471\\r\\n1481\\r\\n1483\\r\\n1487\\r\\n1489\\r\\n1493\\r\\n1499\\r\\n1511\\r\\n1523\\r\\n1531\\r\\n1543\\r\\n1549\\r\\n1553\\r\\n1559\\r\\n1567\\r\\n1571\\r\\n1579\\r\\n1583\\r\\n1597\\r\\n1601\\r\\n1607\\r\\n1609\\r\\n1613\\r\\n1619\\r\\n1621\\r\\n1627\\r\\n1637\\r\\n1657\\r\\n1663\\r\\n1667\\r\\n1669\\r\\n1693\\r\\n1697\\r\\n1699\\r\\n1709\\r\\n1721\\r\\n1723\\r\\n1733\\r\\n1741\\r\\n1747\\r\\n1753\\r\\n1759\\r\\n1777\\r\\n1783\\r\\n1787\\r\\n1789\\r\\n1801\\r\\n1811\\r\\n1823\\r\\n1831\\r\\n1847\\r\\n1861\\r\\n1867\\r\\n1871\\r\\n1873\\r\\n1877\\r\\n1879\\r\\n1889\\r\\n1901\\r\\n1907\\r\\n1913\\r\\n1931\\r\\n1933\\r\\n1949\\r\\n1951\\r\\n1973\\r\\n1979\\r\\n1987\\r\\n1993\\r\\n1997\\r\\n1999\\r\\n2003\\r\\n2011\\r\\n2017\\r\\n2027\\r\\n2029\\r\\n2039\\r\\n2053\\r\\n2063\\r\\n2069\\r\\n2081\\r\\n2083\\r\\n2087\\r\\n2089\\r\\n2099\\r\\n2111\\r\\n2113\\r\\n2129\\r\\n2131\\r\\n2137\\r\\n2141\\r\\n2143\\r\\n2153\\r\\n2161\\r\\n2179\\r\\n2203\\r\\n2207\\r\\n2213\\r\\n2221\\r\\n2237\\r\\n2239\\r\\n2243\\r\\n2251\\r\\n2267\\r\\n2269\\r\\n2273\\r\\n2281\\r\\n2287\\r\\n2293\\r\\n2297\\r\\n2309\\r\\n2311\\r\\n2333\\r\\n2339\\r\\n2341\\r\\n2347\\r\\n2351\\r\\n2357\\r\\n2371\\r\\n2377\\r\\n2381\\r\\n2383\\r\\n2389\\r\\n2393\\r\\n2399\\r\\n2411\\r\\n2417\\r\\n2423\\r\\n2437\\r\\n2441\\r\\n2447\\r\\n2459\\r\\n2467\\r\\n2473\\r\\n2477\\r\\n2503\\r\\n2521\\r\\n2531\\r\\n2539\\r\\n2543\\r\\n2549\\r\\n2551\\r\\n2557\\r\\n2579\\r\\n2591\\r\\n2593\\r\\n2609\\r\\n2617\\r\\n2621\\r\\n2633\\r\\n2647\\r\\n2657\\r\\n2659\\r\\n2663\\r\\n2671\\r\\n2677\\r\\n2683\\r\\n2687\\r\\n2689\\r\\n2693\\r\\n2699\\r\\n2707\\r\\n2711\\r\\n2713\\r\\n2719\\r\\n2729\\r\\n2731\\r\\n2741\\r\\n2749\\r\\n2753\\r\\n2767\\r\\n2777\\r\\n2789\\r\\n2791\\r\\n2797\\r\\n2801\\r\\n2803\\r\\n2819\\r\\n2833\\r\\n2837\\r\\n2843\\r\\n2851\\r\\n2857\\r\\n2861\\r\\n2879\\r\\n2887\\r\\n2897\\r\\n2903\\r\\n2909\\r\\n2917\\r\\n2927\\r\\n2939\\r\\n2953\\r\\n2957\\r\\n2963\\r\\n2969\\r\\n2971\\r\\n2999\\r\\n3001\\r\\n3011\\r\\n3019\\r\\n3023\\r\\n3037\\r\\n3041\\r\\n3049\\r\\n3061\\r\\n3067\\r\\n3079\\r\\n3083\\r\\n3089\\r\\n3109\\r\\n3119\\r\\n3121\\r\\n3137\\r\\n3163\\r\\n3167\\r\\n3169\\r\\n3181\\r\\n3187\\r\\n3191\\r\\n3203\\r\\n3209\\r\\n3217\\r\\n3221\\r\\n3229\\r\\n3251\\r\\n3253\\r\\n3257\\r\\n3259\\r\\n3271\\r\\n3299\\r\\n3301\\r\\n3307\\r\\n3313\\r\\n3319\\r\\n3323\\r\\n3329\\r\\n3331\\r\\n3343\\r\\n3347\\r\\n3359\\r\\n3361\\r\\n3371\\r\\n3373\\r\\n3389\\r\\n3391\\r\\n3407\\r\\n3413\\r\\n3433\\r\\n3449\\r\\n3457\\r\\n3461\\r\\n3463\\r\\n3467\\r\\n3469\\r\\n3491\\r\\n3499\\r\\n3511\\r\\n3517\\r\\n3527\\r\\n3529\\r\\n3533\\r\\n3539\\r\\n3541\\r\\n3547\\r\\n3557\\r\\n3559\\r\\n3571\\r\\n3581\\r\\n3583\\r\\n3593\\r\\n3607\\r\\n3613\\r\\n3617\\r\\n3623\\r\\n3631\\r\\n3637\\r\\n3643\\r\\n3659\\r\\n3671\\r\\n3673\\r\\n3677\\r\\n3691\\r\\n3697\\r\\n3701\\r\\n3709\\r\\n3719\\r\\n3727\\r\\n3733\\r\\n3739\\r\\n3761\\r\\n3767\\r\\n3769\\r\\n3779\\r\\n3793\\r\\n3797\\r\\n3803\\r\\n3821\\r\\n3823\\r\\n3833\\r\\n3847\\r\\n3851\\r\\n3853\\r\\n3863\\r\\n3877\\r\\n3881\\r\\n3889\\r\\n3907\\r\\n3911\\r\\n3917\\r\\n3919\\r\\n3923\\r\\n3929\\r\\n3931\\r\\n3943\\r\\n3947\\r\\n3967\\r\\n3989\\r\\n4001\\r\\n4003\\r\\n4007\\r\\n4013\\r\\n4019\\r\\n4021\\r\\n4027\\r\\n4049\\r\\n4051\\r\\n4057\\r\\n4073\\r\\n4079\\r\\n4091\\r\\n4093\\r\\n4099\\r\\n4111\\r\\n4127\\r\\n4129\\r\\n4133\\r\\n4139\\r\\n4153\\r\\n4157\\r\\n4159\\r\\n4177\\r\\n4201\\r\\n4211\\r\\n4217\\r\\n4219\\r\\n4229\\r\\n4231\\r\\n4241\\r\\n4243\\r\\n4253\\r\\n4259\\r\\n4261\\r\\n4271\\r\\n4273\\r\\n4283\\r\\n4289\\r\\n4297\\r\\n4327\\r\\n4337\\r\\n4339\\r\\n4349\\r\\n4357\\r\\n4363\\r\\n4373\\r\\n4391\\r\\n4397\\r\\n4409\\r\\n4421\\r\\n4423\\r\\n4441\\r\\n4447\\r\\n4451\\r\\n4457\\r\\n4463\\r\\n4481\\r\\n4483\\r\\n4493\\r\\n4507\\r\\n4513\\r\\n4517\\r\\n4519\\r\\n4523\\r\\n4547\\r\\n4549\\r\\n4561\\r\\n4567\\r\\n4583\\r\\n4591\\r\\n4597\\r\\n4603\\r\\n4621\\r\\n4637\\r\\n4639\\r\\n4643\\r\\n4649\\r\\n4651\\r\\n4657\\r\\n4663\\r\\n4673\\r\\n4679\\r\\n4691\\r\\n4703\\r\\n4721\\r\\n4723\\r\\n4729\\r\\n4733\\r\\n4751\\r\\n4759\\r\\n4783\\r\\n4787\\r\\n4789\\r\\n4793\\r\\n4799\\r\\n4801\\r\\n4813\\r\\n4817\\r\\n4831\\r\\n4861\\r\\n4871\\r\\n4877\\r\\n4889\\r\\n4903\\r\\n4909\\r\\n4919\\r\\n4931\\r\\n4933\\r\\n4937\\r\\n4943\\r\\n4951\\r\\n4957\\r\\n4967\\r\\n4969\\r\\n4973\\r\\n4987\\r\\n4993\\r\\n4999\\r\\n5003\\r\\n5009\\r\\n5011\\r\\n5021\\r\\n5023\\r\\n5039\\r\\n5051\\r\\n5059\\r\\n5077\\r\\n5081\\r\\n5087\\r\\n5099\\r\\n5101\\r\\n5107\\r\\n5113\\r\\n5119\\r\\n5147\\r\\n5153\\r\\n5167\\r\\n5171\\r\\n5179\\r\\n5189\\r\\n5197\\r\\n5209\\r\\n5227\\r\\n5231\\r\\n5233\\r\\n5237\\r\\n5261\\r\\n5273\\r\\n5279\\r\\n5281\\r\\n5297\\r\\n5303\\r\\n5309\\r\\n5323\\r\\n5333\\r\\n5347\\r\\n5351\\r\\n5381\\r\\n5387\\r\\n5393\\r\\n5399\\r\\n5407\\r\\n5413\\r\\n5417\\r\\n5419\\r\\n5431\\r\\n5437\\r\\n5441\\r\\n5443\\r\\n5449\\r\\n5471\\r\\n5477\\r\\n5479\\r\\n5483\\r\\n5501\\r\\n5503\\r\\n5507\\r\\n5519\\r\\n5521\\r\\n5527\\r\\n5531\\r\\n5557\\r\\n5563\\r\\n5569\\r\\n5573\\r\\n5581\\r\\n5591\\r\\n5623\\r\\n5639\\r\\n5641\\r\\n5647\\r\\n5651\\r\\n5653\\r\\n5657\\r\\n5659\\r\\n5669\\r\\n5683\\r\\n5689\\r\\n5693\\r\\n5701\\r\\n5711\\r\\n5717\\r\\n5737\\r\\n5741\\r\\n5743\\r\\n5749\\r\\n5779\\r\\n5783\\r\\n5791\\r\\n5801\\r\\n5807\\r\\n5813\\r\\n5821\\r\\n5827\\r\\n5839\\r\\n5843\\r\\n5849\\r\\n5851\\r\\n5857\\r\\n5861\\r\\n5867\\r\\n5869\\r\\n5879\\r\\n5881\\r\\n5897\\r\\n5903\\r\\n5923\\r\\n5927\\r\\n5939\\r\\n5953\\r\\n5981\\r\\n5987\\r\\n6007\\r\\n6011\\r\\n6029\\r\\n6037\\r\\n6043\\r\\n6047\\r\\n6053\\r\\n6067\\r\\n6073\\r\\n6079\\r\\n6089\\r\\n6091\\r\\n6101\\r\\n6113\\r\\n6121\\r\\n6131\\r\\n6133\\r\\n6143\\r\\n6151\\r\\n6163\\r\\n6173\\r\\n6197\\r\\n6199\\r\\n6203\\r\\n6211\\r\\n6217\\r\\n6221\\r\\n6229\\r\\n6247\\r\\n6257\\r\\n6263\\r\\n6269\\r\\n6271\\r\\n6277\\r\\n6287\\r\\n6299\\r\\n6301\\r\\n6311\\r\\n6317\\r\\n6323\\r\\n6329\\r\\n6337\\r\\n6343\\r\\n6353\\r\\n6359\\r\\n6361\\r\\n6367\\r\\n6373\\r\\n6379\\r\\n6389\\r\\n6397\\r\\n6421\\r\\n6427\\r\\n6449\\r\\n6451\\r\\n6469\\r\\n6473\\r\\n6481\\r\\n6491\\r\\n6521\\r\\n6529\\r\\n6547\\r\\n6551\\r\\n6553\\r\\n6563\\r\\n6569\\r\\n6571\\r\\n6577\\r\\n6581\\r\\n6599\\r\\n6607\\r\\n6619\\r\\n6637\\r\\n6653\\r\\n6659\\r\\n6661\\r\\n6673\\r\\n6679\\r\\n6689\\r\\n6691\\r\\n6701\\r\\n6703\\r\\n6709\\r\\n6719\\r\\n6733\\r\\n6737\\r\\n6761\\r\\n6763\\r\\n6779\\r\\n6781\\r\\n6791\\r\\n6793\\r\\n6803\\r\\n6823\\r\\n6827\\r\\n6829\\r\\n6833\\r\\n6841\\r\\n6857\\r\\n6863\\r\\n6869\\r\\n6871\\r\\n6883\\r\\n6899\\r\\n6907\\r\\n6911\\r\\n6917\\r\\n6947\\r\\n6949\\r\\n6959\\r\\n6961\\r\\n6967\\r\\n6971\\r\\n6977\\r\\n6983\\r\\n6991\\r\\n6997\\r\\n7001\\r\\n7013\\r\\n7019\\r\\n7027\\r\\n7039\\r\\n7043\\r\\n7057\\r\\n7069\\r\\n7079\\r\\n7103\\r\\n7109\\r\\n7121\\r\\n7127\\r\\n7129\\r\\n7151\\r\\n7159\\r\\n7177\\r\\n7187\\r\\n7193\\r\\n7207\\r\\n7211\\r\\n7213\\r\\n7219\\r\\n7229\\r\\n7237\\r\\n7243\\r\\n7247\\r\\n7253\\r\\n7283\\r\\n7297\\r\\n7307\\r\\n7309\\r\\n7321\\r\\n7331\\r\\n7333\\r\\n7349\\r\\n7351\\r\\n7369\\r\\n7393\\r\\n7411\\r\\n7417\\r\\n7433\\r\\n7451\\r\\n7457\\r\\n7459\\r\\n7477\\r\\n7481\\r\\n7487\\r\\n7489\\r\\n7499\\r\\n7507\\r\\n7517\\r\\n7523\\r\\n7529\\r\\n7537\\r\\n7541\\r\\n7547\\r\\n7549\\r\\n7559\\r\\n7561\\r\\n7573\\r\\n7577\\r\\n7583\\r\\n7589\\r\\n7591\\r\\n7603\\r\\n7607\\r\\n7621\\r\\n7639\\r\\n7643\\r\\n7649\\r\\n7669\\r\\n7673\\r\\n7681\\r\\n7687\\r\\n7691\\r\\n7699\\r\\n7703\\r\\n7717\\r\\n7723\\r\\n7727\\r\\n7741\\r\\n7753\\r\\n7757\\r\\n7759\\r\\n7789\\r\\n7793\\r\\n7817\\r\\n7823\\r\\n7829\\r\\n7841\\r\\n7853\\r\\n7867\\r\\n7873\\r\\n7877\\r\\n7879\\r\\n7883\\r\\n7901\\r\\n7907\\r\\n7919\\r\\n\", \"base64Encoded\": false, \"contentType\": \"text/csv\"}}\n", - "error": "", - "result": { - "prime_numbers.csv": { - "content": "2\r\n3\r\n5\r\n7\r\n11\r\n13\r\n17\r\n19\r\n23\r\n29\r\n31\r\n37\r\n41\r\n43\r\n47\r\n53\r\n59\r\n61\r\n67\r\n71\r\n73\r\n79\r\n83\r\n89\r\n97\r\n101\r\n103\r\n107\r\n109\r\n113\r\n127\r\n131\r\n137\r\n139\r\n149\r\n151\r\n157\r\n163\r\n167\r\n173\r\n179\r\n181\r\n191\r\n193\r\n197\r\n199\r\n211\r\n223\r\n227\r\n229\r\n233\r\n239\r\n241\r\n251\r\n257\r\n263\r\n269\r\n271\r\n277\r\n281\r\n283\r\n293\r\n307\r\n311\r\n313\r\n317\r\n331\r\n337\r\n347\r\n349\r\n353\r\n359\r\n367\r\n373\r\n379\r\n383\r\n389\r\n397\r\n401\r\n409\r\n419\r\n421\r\n431\r\n433\r\n439\r\n443\r\n449\r\n457\r\n461\r\n463\r\n467\r\n479\r\n487\r\n491\r\n499\r\n503\r\n509\r\n521\r\n523\r\n541\r\n547\r\n557\r\n563\r\n569\r\n571\r\n577\r\n587\r\n593\r\n599\r\n601\r\n607\r\n613\r\n617\r\n619\r\n631\r\n641\r\n643\r\n647\r\n653\r\n659\r\n661\r\n673\r\n677\r\n683\r\n691\r\n701\r\n709\r\n719\r\n727\r\n733\r\n739\r\n743\r\n751\r\n757\r\n761\r\n769\r\n773\r\n787\r\n797\r\n809\r\n811\r\n821\r\n823\r\n827\r\n829\r\n839\r\n853\r\n857\r\n859\r\n863\r\n877\r\n881\r\n883\r\n887\r\n907\r\n911\r\n919\r\n929\r\n937\r\n941\r\n947\r\n953\r\n967\r\n971\r\n977\r\n983\r\n991\r\n997\r\n1009\r\n1013\r\n1019\r\n1021\r\n1031\r\n1033\r\n1039\r\n1049\r\n1051\r\n1061\r\n1063\r\n1069\r\n1087\r\n1091\r\n1093\r\n1097\r\n1103\r\n1109\r\n1117\r\n1123\r\n1129\r\n1151\r\n1153\r\n1163\r\n1171\r\n1181\r\n1187\r\n1193\r\n1201\r\n1213\r\n1217\r\n1223\r\n1229\r\n1231\r\n1237\r\n1249\r\n1259\r\n1277\r\n1279\r\n1283\r\n1289\r\n1291\r\n1297\r\n1301\r\n1303\r\n1307\r\n1319\r\n1321\r\n1327\r\n1361\r\n1367\r\n1373\r\n1381\r\n1399\r\n1409\r\n1423\r\n1427\r\n1429\r\n1433\r\n1439\r\n1447\r\n1451\r\n1453\r\n1459\r\n1471\r\n1481\r\n1483\r\n1487\r\n1489\r\n1493\r\n1499\r\n1511\r\n1523\r\n1531\r\n1543\r\n1549\r\n1553\r\n1559\r\n1567\r\n1571\r\n1579\r\n1583\r\n1597\r\n1601\r\n1607\r\n1609\r\n1613\r\n1619\r\n1621\r\n1627\r\n1637\r\n1657\r\n1663\r\n1667\r\n1669\r\n1693\r\n1697\r\n1699\r\n1709\r\n1721\r\n1723\r\n1733\r\n1741\r\n1747\r\n1753\r\n1759\r\n1777\r\n1783\r\n1787\r\n1789\r\n1801\r\n1811\r\n1823\r\n1831\r\n1847\r\n1861\r\n1867\r\n1871\r\n1873\r\n1877\r\n1879\r\n1889\r\n1901\r\n1907\r\n1913\r\n1931\r\n1933\r\n1949\r\n1951\r\n1973\r\n1979\r\n1987\r\n1993\r\n1997\r\n1999\r\n2003\r\n2011\r\n2017\r\n2027\r\n2029\r\n2039\r\n2053\r\n2063\r\n2069\r\n2081\r\n2083\r\n2087\r\n2089\r\n2099\r\n2111\r\n2113\r\n2129\r\n2131\r\n2137\r\n2141\r\n2143\r\n2153\r\n2161\r\n2179\r\n2203\r\n2207\r\n2213\r\n2221\r\n2237\r\n2239\r\n2243\r\n2251\r\n2267\r\n2269\r\n2273\r\n2281\r\n2287\r\n2293\r\n2297\r\n2309\r\n2311\r\n2333\r\n2339\r\n2341\r\n2347\r\n2351\r\n2357\r\n2371\r\n2377\r\n2381\r\n2383\r\n2389\r\n2393\r\n2399\r\n2411\r\n2417\r\n2423\r\n2437\r\n2441\r\n2447\r\n2459\r\n2467\r\n2473\r\n2477\r\n2503\r\n2521\r\n2531\r\n2539\r\n2543\r\n2549\r\n2551\r\n2557\r\n2579\r\n2591\r\n2593\r\n2609\r\n2617\r\n2621\r\n2633\r\n2647\r\n2657\r\n2659\r\n2663\r\n2671\r\n2677\r\n2683\r\n2687\r\n2689\r\n2693\r\n2699\r\n2707\r\n2711\r\n2713\r\n2719\r\n2729\r\n2731\r\n2741\r\n2749\r\n2753\r\n2767\r\n2777\r\n2789\r\n2791\r\n2797\r\n2801\r\n2803\r\n2819\r\n2833\r\n2837\r\n2843\r\n2851\r\n2857\r\n2861\r\n2879\r\n2887\r\n2897\r\n2903\r\n2909\r\n2917\r\n2927\r\n2939\r\n2953\r\n2957\r\n2963\r\n2969\r\n2971\r\n2999\r\n3001\r\n3011\r\n3019\r\n3023\r\n3037\r\n3041\r\n3049\r\n3061\r\n3067\r\n3079\r\n3083\r\n3089\r\n3109\r\n3119\r\n3121\r\n3137\r\n3163\r\n3167\r\n3169\r\n3181\r\n3187\r\n3191\r\n3203\r\n3209\r\n3217\r\n3221\r\n3229\r\n3251\r\n3253\r\n3257\r\n3259\r\n3271\r\n3299\r\n3301\r\n3307\r\n3313\r\n3319\r\n3323\r\n3329\r\n3331\r\n3343\r\n3347\r\n3359\r\n3361\r\n3371\r\n3373\r\n3389\r\n3391\r\n3407\r\n3413\r\n3433\r\n3449\r\n3457\r\n3461\r\n3463\r\n3467\r\n3469\r\n3491\r\n3499\r\n3511\r\n3517\r\n3527\r\n3529\r\n3533\r\n3539\r\n3541\r\n3547\r\n3557\r\n3559\r\n3571\r\n3581\r\n3583\r\n3593\r\n3607\r\n3613\r\n3617\r\n3623\r\n3631\r\n3637\r\n3643\r\n3659\r\n3671\r\n3673\r\n3677\r\n3691\r\n3697\r\n3701\r\n3709\r\n3719\r\n3727\r\n3733\r\n3739\r\n3761\r\n3767\r\n3769\r\n3779\r\n3793\r\n3797\r\n3803\r\n3821\r\n3823\r\n3833\r\n3847\r\n3851\r\n3853\r\n3863\r\n3877\r\n3881\r\n3889\r\n3907\r\n3911\r\n3917\r\n3919\r\n3923\r\n3929\r\n3931\r\n3943\r\n3947\r\n3967\r\n3989\r\n4001\r\n4003\r\n4007\r\n4013\r\n4019\r\n4021\r\n4027\r\n4049\r\n4051\r\n4057\r\n4073\r\n4079\r\n4091\r\n4093\r\n4099\r\n4111\r\n4127\r\n4129\r\n4133\r\n4139\r\n4153\r\n4157\r\n4159\r\n4177\r\n4201\r\n4211\r\n4217\r\n4219\r\n4229\r\n4231\r\n4241\r\n4243\r\n4253\r\n4259\r\n4261\r\n4271\r\n4273\r\n4283\r\n4289\r\n4297\r\n4327\r\n4337\r\n4339\r\n4349\r\n4357\r\n4363\r\n4373\r\n4391\r\n4397\r\n4409\r\n4421\r\n4423\r\n4441\r\n4447\r\n4451\r\n4457\r\n4463\r\n4481\r\n4483\r\n4493\r\n4507\r\n4513\r\n4517\r\n4519\r\n4523\r\n4547\r\n4549\r\n4561\r\n4567\r\n4583\r\n4591\r\n4597\r\n4603\r\n4621\r\n4637\r\n4639\r\n4643\r\n4649\r\n4651\r\n4657\r\n4663\r\n4673\r\n4679\r\n4691\r\n4703\r\n4721\r\n4723\r\n4729\r\n4733\r\n4751\r\n4759\r\n4783\r\n4787\r\n4789\r\n4793\r\n4799\r\n4801\r\n4813\r\n4817\r\n4831\r\n4861\r\n4871\r\n4877\r\n4889\r\n4903\r\n4909\r\n4919\r\n4931\r\n4933\r\n4937\r\n4943\r\n4951\r\n4957\r\n4967\r\n4969\r\n4973\r\n4987\r\n4993\r\n4999\r\n5003\r\n5009\r\n5011\r\n5021\r\n5023\r\n5039\r\n5051\r\n5059\r\n5077\r\n5081\r\n5087\r\n5099\r\n5101\r\n5107\r\n5113\r\n5119\r\n5147\r\n5153\r\n5167\r\n5171\r\n5179\r\n5189\r\n5197\r\n5209\r\n5227\r\n5231\r\n5233\r\n5237\r\n5261\r\n5273\r\n5279\r\n5281\r\n5297\r\n5303\r\n5309\r\n5323\r\n5333\r\n5347\r\n5351\r\n5381\r\n5387\r\n5393\r\n5399\r\n5407\r\n5413\r\n5417\r\n5419\r\n5431\r\n5437\r\n5441\r\n5443\r\n5449\r\n5471\r\n5477\r\n5479\r\n5483\r\n5501\r\n5503\r\n5507\r\n5519\r\n5521\r\n5527\r\n5531\r\n5557\r\n5563\r\n5569\r\n5573\r\n5581\r\n5591\r\n5623\r\n5639\r\n5641\r\n5647\r\n5651\r\n5653\r\n5657\r\n5659\r\n5669\r\n5683\r\n5689\r\n5693\r\n5701\r\n5711\r\n5717\r\n5737\r\n5741\r\n5743\r\n5749\r\n5779\r\n5783\r\n5791\r\n5801\r\n5807\r\n5813\r\n5821\r\n5827\r\n5839\r\n5843\r\n5849\r\n5851\r\n5857\r\n5861\r\n5867\r\n5869\r\n5879\r\n5881\r\n5897\r\n5903\r\n5923\r\n5927\r\n5939\r\n5953\r\n5981\r\n5987\r\n6007\r\n6011\r\n6029\r\n6037\r\n6043\r\n6047\r\n6053\r\n6067\r\n6073\r\n6079\r\n6089\r\n6091\r\n6101\r\n6113\r\n6121\r\n6131\r\n6133\r\n6143\r\n6151\r\n6163\r\n6173\r\n6197\r\n6199\r\n6203\r\n6211\r\n6217\r\n6221\r\n6229\r\n6247\r\n6257\r\n6263\r\n6269\r\n6271\r\n6277\r\n6287\r\n6299\r\n6301\r\n6311\r\n6317\r\n6323\r\n6329\r\n6337\r\n6343\r\n6353\r\n6359\r\n6361\r\n6367\r\n6373\r\n6379\r\n6389\r\n6397\r\n6421\r\n6427\r\n6449\r\n6451\r\n6469\r\n6473\r\n6481\r\n6491\r\n6521\r\n6529\r\n6547\r\n6551\r\n6553\r\n6563\r\n6569\r\n6571\r\n6577\r\n6581\r\n6599\r\n6607\r\n6619\r\n6637\r\n6653\r\n6659\r\n6661\r\n6673\r\n6679\r\n6689\r\n6691\r\n6701\r\n6703\r\n6709\r\n6719\r\n6733\r\n6737\r\n6761\r\n6763\r\n6779\r\n6781\r\n6791\r\n6793\r\n6803\r\n6823\r\n6827\r\n6829\r\n6833\r\n6841\r\n6857\r\n6863\r\n6869\r\n6871\r\n6883\r\n6899\r\n6907\r\n6911\r\n6917\r\n6947\r\n6949\r\n6959\r\n6961\r\n6967\r\n6971\r\n6977\r\n6983\r\n6991\r\n6997\r\n7001\r\n7013\r\n7019\r\n7027\r\n7039\r\n7043\r\n7057\r\n7069\r\n7079\r\n7103\r\n7109\r\n7121\r\n7127\r\n7129\r\n7151\r\n7159\r\n7177\r\n7187\r\n7193\r\n7207\r\n7211\r\n7213\r\n7219\r\n7229\r\n7237\r\n7243\r\n7247\r\n7253\r\n7283\r\n7297\r\n7307\r\n7309\r\n7321\r\n7331\r\n7333\r\n7349\r\n7351\r\n7369\r\n7393\r\n7411\r\n7417\r\n7433\r\n7451\r\n7457\r\n7459\r\n7477\r\n7481\r\n7487\r\n7489\r\n7499\r\n7507\r\n7517\r\n7523\r\n7529\r\n7537\r\n7541\r\n7547\r\n7549\r\n7559\r\n7561\r\n7573\r\n7577\r\n7583\r\n7589\r\n7591\r\n7603\r\n7607\r\n7621\r\n7639\r\n7643\r\n7649\r\n7669\r\n7673\r\n7681\r\n7687\r\n7691\r\n7699\r\n7703\r\n7717\r\n7723\r\n7727\r\n7741\r\n7753\r\n7757\r\n7759\r\n7789\r\n7793\r\n7817\r\n7823\r\n7829\r\n7841\r\n7853\r\n7867\r\n7873\r\n7877\r\n7879\r\n7883\r\n7901\r\n7907\r\n7919\r\n", - "base64Encoded": false, - "contentType": "text/csv" - } - }, - "exitCode": 0 - } - } -] \ No newline at end of file diff --git a/static/1_LF-Details.png b/static/1_LF-Details.png deleted file mode 100644 index 3a2be57d..00000000 Binary files a/static/1_LF-Details.png and /dev/null differ diff --git a/static/20_prime_numbers.csv b/static/20_prime_numbers.csv deleted file mode 100644 index d5c2a856..00000000 --- a/static/20_prime_numbers.csv +++ /dev/null @@ -1,1000 +0,0 @@ -2 -3 -5 -7 -11 -13 -17 -19 -23 -29 -31 -37 -41 -43 -47 -53 -59 -61 -67 -71 -73 -79 -83 -89 -97 -101 -103 -107 -109 -113 -127 -131 -137 -139 -149 -151 -157 -163 -167 -173 -179 -181 -191 -193 -197 -199 -211 -223 -227 -229 -233 -239 -241 -251 -257 -263 -269 -271 -277 -281 -283 -293 -307 -311 -313 -317 -331 -337 -347 -349 -353 -359 -367 -373 -379 -383 -389 -397 -401 -409 -419 -421 -431 -433 -439 -443 -449 -457 -461 -463 -467 -479 -487 -491 -499 -503 -509 -521 -523 -541 -547 -557 -563 -569 -571 -577 -587 -593 -599 -601 -607 -613 -617 -619 -631 -641 -643 -647 -653 -659 -661 -673 -677 -683 -691 -701 -709 -719 -727 -733 -739 -743 -751 -757 -761 -769 -773 -787 -797 -809 -811 -821 -823 -827 -829 -839 -853 -857 -859 -863 -877 -881 -883 -887 -907 -911 -919 -929 -937 -941 -947 -953 -967 -971 -977 -983 -991 -997 -1009 -1013 -1019 -1021 -1031 -1033 -1039 -1049 -1051 -1061 -1063 -1069 -1087 -1091 -1093 -1097 -1103 -1109 -1117 -1123 -1129 -1151 -1153 -1163 -1171 -1181 -1187 -1193 -1201 -1213 -1217 -1223 -1229 -1231 -1237 -1249 -1259 -1277 -1279 -1283 -1289 -1291 -1297 -1301 -1303 -1307 -1319 -1321 -1327 -1361 -1367 -1373 -1381 -1399 -1409 -1423 -1427 -1429 -1433 -1439 -1447 -1451 -1453 -1459 -1471 -1481 -1483 -1487 -1489 -1493 -1499 -1511 -1523 -1531 -1543 -1549 -1553 -1559 -1567 -1571 -1579 -1583 -1597 -1601 -1607 -1609 -1613 -1619 -1621 -1627 -1637 -1657 -1663 -1667 -1669 -1693 -1697 -1699 -1709 -1721 -1723 -1733 -1741 -1747 -1753 -1759 -1777 -1783 -1787 -1789 -1801 -1811 -1823 -1831 -1847 -1861 -1867 -1871 -1873 -1877 -1879 -1889 -1901 -1907 -1913 -1931 -1933 -1949 -1951 -1973 -1979 -1987 -1993 -1997 -1999 -2003 -2011 -2017 -2027 -2029 -2039 -2053 -2063 -2069 -2081 -2083 -2087 -2089 -2099 -2111 -2113 -2129 -2131 -2137 -2141 -2143 -2153 -2161 -2179 -2203 -2207 -2213 -2221 -2237 -2239 -2243 -2251 -2267 -2269 -2273 -2281 -2287 -2293 -2297 -2309 -2311 -2333 -2339 -2341 -2347 -2351 -2357 -2371 -2377 -2381 -2383 -2389 -2393 -2399 -2411 -2417 -2423 -2437 -2441 -2447 -2459 -2467 -2473 -2477 -2503 -2521 -2531 -2539 -2543 -2549 -2551 -2557 -2579 -2591 -2593 -2609 -2617 -2621 -2633 -2647 -2657 -2659 -2663 -2671 -2677 -2683 -2687 -2689 -2693 -2699 -2707 -2711 -2713 -2719 -2729 -2731 -2741 -2749 -2753 -2767 -2777 -2789 -2791 -2797 -2801 -2803 -2819 -2833 -2837 -2843 -2851 -2857 -2861 -2879 -2887 -2897 -2903 -2909 -2917 -2927 -2939 -2953 -2957 -2963 -2969 -2971 -2999 -3001 -3011 -3019 -3023 -3037 -3041 -3049 -3061 -3067 -3079 -3083 -3089 -3109 -3119 -3121 -3137 -3163 -3167 -3169 -3181 -3187 -3191 -3203 -3209 -3217 -3221 -3229 -3251 -3253 -3257 -3259 -3271 -3299 -3301 -3307 -3313 -3319 -3323 -3329 -3331 -3343 -3347 -3359 -3361 -3371 -3373 -3389 -3391 -3407 -3413 -3433 -3449 -3457 -3461 -3463 -3467 -3469 -3491 -3499 -3511 -3517 -3527 -3529 -3533 -3539 -3541 -3547 -3557 -3559 -3571 -3581 -3583 -3593 -3607 -3613 -3617 -3623 -3631 -3637 -3643 -3659 -3671 -3673 -3677 -3691 -3697 -3701 -3709 -3719 -3727 -3733 -3739 -3761 -3767 -3769 -3779 -3793 -3797 -3803 -3821 -3823 -3833 -3847 -3851 -3853 -3863 -3877 -3881 -3889 -3907 -3911 -3917 -3919 -3923 -3929 -3931 -3943 -3947 -3967 -3989 -4001 -4003 -4007 -4013 -4019 -4021 -4027 -4049 -4051 -4057 -4073 -4079 -4091 -4093 -4099 -4111 -4127 -4129 -4133 -4139 -4153 -4157 -4159 -4177 -4201 -4211 -4217 -4219 -4229 -4231 -4241 -4243 -4253 -4259 -4261 -4271 -4273 -4283 -4289 -4297 -4327 -4337 -4339 -4349 -4357 -4363 -4373 -4391 -4397 -4409 -4421 -4423 -4441 -4447 -4451 -4457 -4463 -4481 -4483 -4493 -4507 -4513 -4517 -4519 -4523 -4547 -4549 -4561 -4567 -4583 -4591 -4597 -4603 -4621 -4637 -4639 -4643 -4649 -4651 -4657 -4663 -4673 -4679 -4691 -4703 -4721 -4723 -4729 -4733 -4751 -4759 -4783 -4787 -4789 -4793 -4799 -4801 -4813 -4817 -4831 -4861 -4871 -4877 -4889 -4903 -4909 -4919 -4931 -4933 -4937 -4943 -4951 -4957 -4967 -4969 -4973 -4987 -4993 -4999 -5003 -5009 -5011 -5021 -5023 -5039 -5051 -5059 -5077 -5081 -5087 -5099 -5101 -5107 -5113 -5119 -5147 -5153 -5167 -5171 -5179 -5189 -5197 -5209 -5227 -5231 -5233 -5237 -5261 -5273 -5279 -5281 -5297 -5303 -5309 -5323 -5333 -5347 -5351 -5381 -5387 -5393 -5399 -5407 -5413 -5417 -5419 -5431 -5437 -5441 -5443 -5449 -5471 -5477 -5479 -5483 -5501 -5503 -5507 -5519 -5521 -5527 -5531 -5557 -5563 -5569 -5573 -5581 -5591 -5623 -5639 -5641 -5647 -5651 -5653 -5657 -5659 -5669 -5683 -5689 -5693 -5701 -5711 -5717 -5737 -5741 -5743 -5749 -5779 -5783 -5791 -5801 -5807 -5813 -5821 -5827 -5839 -5843 -5849 -5851 -5857 -5861 -5867 -5869 -5879 -5881 -5897 -5903 -5923 -5927 -5939 -5953 -5981 -5987 -6007 -6011 -6029 -6037 -6043 -6047 -6053 -6067 -6073 -6079 -6089 -6091 -6101 -6113 -6121 -6131 -6133 -6143 -6151 -6163 -6173 -6197 -6199 -6203 -6211 -6217 -6221 -6229 -6247 -6257 -6263 -6269 -6271 -6277 -6287 -6299 -6301 -6311 -6317 -6323 -6329 -6337 -6343 -6353 -6359 -6361 -6367 -6373 -6379 -6389 -6397 -6421 -6427 -6449 -6451 -6469 -6473 -6481 -6491 -6521 -6529 -6547 -6551 -6553 -6563 -6569 -6571 -6577 -6581 -6599 -6607 -6619 -6637 -6653 -6659 -6661 -6673 -6679 -6689 -6691 -6701 -6703 -6709 -6719 -6733 -6737 -6761 -6763 -6779 -6781 -6791 -6793 -6803 -6823 -6827 -6829 -6833 -6841 -6857 -6863 -6869 -6871 -6883 -6899 -6907 -6911 -6917 -6947 -6949 -6959 -6961 -6967 -6971 -6977 -6983 -6991 -6997 -7001 -7013 -7019 -7027 -7039 -7043 -7057 -7069 -7079 -7103 -7109 -7121 -7127 -7129 -7151 -7159 -7177 -7187 -7193 -7207 -7211 -7213 -7219 -7229 -7237 -7243 -7247 -7253 -7283 -7297 -7307 -7309 -7321 -7331 -7333 -7349 -7351 -7369 -7393 -7411 -7417 -7433 -7451 -7457 -7459 -7477 -7481 -7487 -7489 -7499 -7507 -7517 -7523 -7529 -7537 -7541 -7547 -7549 -7559 -7561 -7573 -7577 -7583 -7589 -7591 -7603 -7607 -7621 -7639 -7643 -7649 -7669 -7673 -7681 -7687 -7691 -7699 -7703 -7717 -7723 -7727 -7741 -7753 -7757 -7759 -7789 -7793 -7817 -7823 -7829 -7841 -7853 -7867 -7873 -7877 -7879 -7883 -7901 -7907 -7919 diff --git a/static/21_email_preview.html b/static/21_email_preview.html deleted file mode 100644 index b02167ca..00000000 --- a/static/21_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Prime Numbers CSV - - - -
-
-

Email Template Preview

-
-
-
-
To:
-
recipient@example.com
-
-
-
Subject:
-
Prime Numbers CSV
-
- -
- -
- - - \ No newline at end of file diff --git a/static/22_email_template.json b/static/22_email_template.json deleted file mode 100644 index ea8be43b..00000000 --- a/static/22_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "recipient@example.com", - "subject": "Prime Numbers CSV", - "plainBody": "Sehr geehrte Damen und Herren,\n\nanbei finden Sie die Datei 'prime_numbers.csv', die die Liste der Primzahlen enth\u00e4lt.\n\nMit freundlichen Gr\u00fc\u00dfen,\nIhr Team", - "htmlBody": "

Sehr geehrte Damen und Herren,

anbei finden Sie die Datei 'prime_numbers.csv', die die Liste der Primzahlen enth\u00e4lt.

Mit freundlichen Gr\u00fc\u00dfen,
Ihr Team

" -} \ No newline at end of file diff --git a/static/23_documentProcessor.py b/static/23_documentProcessor.py deleted file mode 100644 index d3b637e1..00000000 --- a/static/23_documentProcessor.py +++ /dev/null @@ -1,933 +0,0 @@ -""" -Module for extracting content from various file formats. -Provides specialized functions for processing text, PDF, Office documents, images, etc. -""" - -import logging -import os -import io -from typing import Dict, Any, List, Optional, Union, Tuple -import base64 - -# Configure logger -logger = logging.getLogger(__name__) - -# Optional imports - only loaded when needed -pdfExtractorLoaded = False -officeExtractorLoaded = False -imageProcessorLoaded = False - -def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]: - """ - Main function for extracting content from a file based on its MIME type. - Delegates to specialized extraction functions. - - Args: - fileMetadata: File metadata (Name, MIME type, etc.) - fileContent: Binary data of the file - - Returns: - List of Document-Content objects with metadata and base64Encoded flag - """ - try: - mimeType = fileMetadata.get("mimeType", "application/octet-stream") - fileName = fileMetadata.get("name", "unknown") - - logger.info(f"Extracting content from file '{fileName}' (MIME type: {mimeType})") - - # Extract content based on MIME type - contents = [] - - # Text-based formats (excluding CSV which has its own handler) - if mimeType == "text/csv": - contents.extend(extractCsvContent(fileName, fileContent)) - - # Then handle other text-based formats - elif mimeType.startswith("text/") or mimeType in [ - "application/json", - "application/xml", - "application/javascript", - "application/x-python" - ]: - contents.extend(extractTextContent(fileName, fileContent, mimeType)) - - # SVG Files - elif mimeType == "image/svg+xml": - contents.extend(extractSvgContent(fileName, fileContent)) - - # Images - elif mimeType.startswith("image/"): - contents.extend(extractImageContent(fileName, fileContent, mimeType)) - - # PDF Documents - elif mimeType == "application/pdf": - contents.extend(extractPdfContent(fileName, fileContent)) - - # Word Documents - elif mimeType in [ - "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - "application/msword" - ]: - contents.extend(extractWordContent(fileName, fileContent, mimeType)) - - # Excel Documents - elif mimeType in [ - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - "application/vnd.ms-excel" - ]: - contents.extend(extractExcelContent(fileName, fileContent, mimeType)) - - # PowerPoint Documents - elif mimeType in [ - "application/vnd.openxmlformats-officedocument.presentationml.presentation", - "application/vnd.ms-powerpoint" - ]: - contents.extend(extractPowerpointContent(fileName, fileContent, mimeType)) - - # Binary data as fallback for unknown formats - else: - contents.extend(extractBinaryContent(fileName, fileContent, mimeType)) - - # Fallback when no content could be extracted - if not contents: - logger.warning(f"No content extracted from file '{fileName}', using binary fallback") - - # Convert binary content to base64 - encoded_data = base64.b64encode(fileContent).decode('utf-8') - - contents.append({ - "sequenceNr": 1, - "name": '1_undefined', - "ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin", - "contentType": mimeType, - "data": encoded_data, - "base64Encoded": True, - "metadata": { - "isText": False - } - }) - - # Add generic attributes for all documents - for content in contents: - # Make sure all content items have the base64Encoded flag - if "base64Encoded" not in content: - if isinstance(content.get("data"), bytes): - # Convert bytes to base64 - content["data"] = base64.b64encode(content["data"]).decode('utf-8') - content["base64Encoded"] = True - else: - # Assume text content if not explicitly marked - content["base64Encoded"] = False - - # Maintain backward compatibility with old "base64Encoded" flag in metadata - if "metadata" not in content: - content["metadata"] = {} - - # Set base64Encoded in metadata for backward compatibility - content["metadata"]["base64Encoded"] = content["base64Encoded"] - - logger.info(f"Successfully extracted {len(contents)} content items from file '{fileName}'") - return contents - - except Exception as e: - logger.error(f"Error during content extraction: {str(e)}") - # Fallback on error - return original data - return [{ - "sequenceNr": 1, - "name": fileMetadata.get("name", "unknown"), - "ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin", - "contentType": fileMetadata.get("mimeType", "application/octet-stream"), - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "base64Encoded": True # For backward compatibility - } - }] - - -def _loadPdfExtractor(): - """Loads PDF extraction libraries when needed""" - global pdfExtractorLoaded - if not pdfExtractorLoaded: - try: - global PyPDF2, fitz - import PyPDF2 - import fitz # PyMuPDF for more extensive PDF processing - pdfExtractorLoaded = True - logger.info("PDF extraction libraries successfully loaded") - except ImportError as e: - logger.warning(f"PDF extraction libraries could not be loaded: {e}") - -def _loadOfficeExtractor(): - """Loads Office document extraction libraries when needed""" - global officeExtractorLoaded - if not officeExtractorLoaded: - try: - global docx, openpyxl - import docx # python-docx for Word documents - import openpyxl # for Excel files - officeExtractorLoaded = True - logger.info("Office extraction libraries successfully loaded") - except ImportError as e: - logger.warning(f"Office extraction libraries could not be loaded: {e}") - -def _loadImageProcessor(): - """Loads image processing libraries when needed""" - global imageProcessorLoaded - if not imageProcessorLoaded: - try: - global PIL, Image - from PIL import Image - imageProcessorLoaded = True - logger.info("Image processing libraries successfully loaded") - except ImportError as e: - logger.warning(f"Image processing libraries could not be loaded: {e}") - -def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: - """ - Extracts text from text files. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - mimeType: MIME type of the file - - Returns: - List of Text-Content objects with base64Encoded = False - """ - try: - # Keep original file extension - fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "txt" - - # Extract text content - textContent = fileContent.decode('utf-8') - return [{ - "sequenceNr": 1, - "name": "1_text", # Simplified naming - "ext": fileExtension, - "contentType": "text/plain", - "data": textContent, - "base64Encoded": False, - "metadata": { - "isText": True - } - }] - except UnicodeDecodeError: - logger.warning(f"Could not decode text from file '{fileName}' as UTF-8, trying alternative encodings") - try: - # Try alternative encodings - for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: - try: - textContent = fileContent.decode(encoding) - logger.info(f"Text successfully decoded with encoding {encoding}") - return [{ - "sequenceNr": 1, - "name": "1_text", # Simplified naming - "ext": fileExtension, - "contentType": "text/plain", - "data": textContent, - "base64Encoded": False, - "metadata": { - "isText": True, - "encoding": encoding - } - }] - except UnicodeDecodeError: - continue - - # Fallback to binary data if no encoding works - logger.warning(f"Could not decode text, using binary data") - return [{ - "sequenceNr": 1, - "name": "1_binary", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False - } - }] - except Exception as e: - logger.error(f"Error in alternative text decoding: {str(e)}") - # Return binary data as fallback - return [{ - "sequenceNr": 1, - "name": "1_binary", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False - } - }] - -def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: - """ - Extracts content from CSV files. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - - Returns: - List of CSV-Content objects with base64Encoded = False - """ - try: - # Extract text content - csvContent = fileContent.decode('utf-8') - return [{ - "sequenceNr": 1, - "name": "1_csv", # Simplified naming - "ext": "csv", - "contentType": "text/csv", - "data": csvContent, - "base64Encoded": False, - "metadata": { - "isText": True, - "format": "csv" - } - }] - except UnicodeDecodeError: - logger.warning(f"Could not decode CSV from file '{fileName}' as UTF-8, trying alternative encodings") - try: - # Try alternative encodings for CSV - for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: - try: - csvContent = fileContent.decode(encoding) - logger.info(f"CSV successfully decoded with encoding {encoding}") - return [{ - "sequenceNr": 1, - "name": "1_csv", # Simplified naming - "ext": "csv", - "contentType": "text/csv", - "data": csvContent, - "base64Encoded": False, - "metadata": { - "isText": True, - "encoding": encoding, - "format": "csv" - } - }] - except UnicodeDecodeError: - continue - - # Fallback to binary data - return [{ - "sequenceNr": 1, - "name": "1_binary", # Simplified naming - "ext": "csv", - "contentType": "text/csv", - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False - } - }] - except Exception as e: - logger.error(f"Error in alternative CSV decoding: {str(e)}") - return [{ - "sequenceNr": 1, - "name": "1_binary", # Simplified naming - "ext": "csv", - "contentType": "text/csv", - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False - } - }] - -def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: - """ - Extracts content from SVG files. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - - Returns: - List of SVG-Content objects with dual text/image metadata - """ - contents = [] - - try: - # Extract SVG as text content (XML) - svgText = fileContent.decode('utf-8') - - # Check if it's actually SVG by looking for the SVG tag - if " List[Dict[str, Any]]: - """ - Extracts content from image files and optionally generates metadata descriptions. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - mimeType: MIME type of the file - - Returns: - List of Image-Content objects with base64Encoded = True - """ - - # Extract file extension from MIME type or filename - fileExtension = mimeType.split('/')[-1] - if fileExtension == "jpeg": - fileExtension = "jpg" - - # If possible, analyze image and extract metadata - imageMetadata = { - "isText": False, - "format": "image" - } - imageDescription = None - - try: - _loadImageProcessor() - if imageProcessorLoaded and fileContent and len(fileContent) > 0: - with io.BytesIO(fileContent) as imgStream: - try: - img = Image.open(imgStream) - # Check if the image was actually loaded - img.verify() - # To safely continue working, reload - imgStream.seek(0) - img = Image.open(imgStream) - imageMetadata.update({ - "format": img.format, - "mode": img.mode, - "width": img.width, - "height": img.height - }) - # Extract EXIF data if available - if hasattr(img, '_getexif') and callable(img._getexif): - exif = img._getexif() - if exif: - exifData = {} - for tagId, value in exif.items(): - exifData[f"tag_{tagId}"] = str(value) - imageMetadata["exif"] = exifData - - # Generate image description - imageDescription = f"Image ({img.width}x{img.height}, {img.format}, {img.mode})" - except Exception as innerE: - logger.warning(f"Error processing image: {str(innerE)}") - imageMetadata["error"] = str(innerE) - imageDescription = f"Image (unable to process: {str(innerE)})" - except Exception as e: - logger.warning(f"Could not extract image metadata: {str(e)}") - imageMetadata["error"] = str(e) - - # Convert binary image to base64 - encoded_data = base64.b64encode(fileContent).decode('utf-8') - - # Return image content - contents = [{ - "sequenceNr": 1, - "name": "1_image", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": encoded_data, - "base64Encoded": True, - "metadata": imageMetadata - }] - - # If image description available, add as additional text content - if imageDescription: - contents.append({ - "sequenceNr": 2, - "name": "2_text_image_info", # Simplified naming with label - "ext": "txt", - "contentType": "text/plain", - "data": imageDescription, - "base64Encoded": False, - "metadata": { - "isText": True, - "imageDescription": True - } - }) - - return contents - -def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: - """ - Extracts text and images from PDF files. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - - Returns: - List of PDF-Content objects (text and images) with appropriate base64Encoded flags - """ - contents = [] - extractedContentFound = False - - try: - # Load PDF extraction libraries - _loadPdfExtractor() - if not pdfExtractorLoaded: - logger.warning("PDF extraction not possible: Libraries not available") - # Add original file as binary content - contents.append({ - "sequenceNr": 1, - "name": "1_pdf", # Simplified naming - "ext": "pdf", - "contentType": "application/pdf", - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "pdf" - } - }) - return contents - - # Extract text with PyPDF2 - extractedText = "" - pdfMetadata = {} - with io.BytesIO(fileContent) as pdfStream: - pdfReader = PyPDF2.PdfReader(pdfStream) - - # Extract metadata - pdfInfo = pdfReader.metadata or {} - for key, value in pdfInfo.items(): - if key.startswith('/'): - pdfMetadata[key[1:]] = value - else: - pdfMetadata[key] = value - - # Extract text from all pages - for pageNum in range(len(pdfReader.pages)): - page = pdfReader.pages[pageNum] - pageText = page.extract_text() - if pageText: - extractedText += f"--- Page {pageNum + 1} ---\n{pageText}\n\n" - - # If text was found, add as separate content - if extractedText.strip(): - extractedContentFound = True - contents.append({ - "sequenceNr": len(contents) + 1, - "name": f"{len(contents) + 1}_text", # Simplified naming - "ext": "txt", - "contentType": "text/plain", - "data": extractedText, - "base64Encoded": False, - "metadata": { - "isText": True, - "source": "pdf", - "pages": len(pdfReader.pages), - "pdfMetadata": pdfMetadata - } - }) - - # Extract images with PyMuPDF (fitz) - try: - with io.BytesIO(fileContent) as pdfStream: - doc = fitz.open(stream=pdfStream, filetype="pdf") - imageCount = 0 - - for pageNum in range(len(doc)): - page = doc[pageNum] - imageList = page.get_images(full=True) - - for imgIndex, imgInfo in enumerate(imageList): - try: - imageCount += 1 - xref = imgInfo[0] - baseImage = doc.extract_image(xref) - imageBytes = baseImage["image"] - imageExt = baseImage["ext"] - - # Add image as content - encode as base64 - extractedContentFound = True - contents.append({ - "sequenceNr": len(contents) + 1, - "name": f"{len(contents) + 1}_image_page{pageNum+1}_{imgIndex+1}", # Simplified naming with label - "ext": imageExt, - "contentType": f"image/{imageExt}", - "data": base64.b64encode(imageBytes).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "source": "pdf", - "page": pageNum + 1, - "index": imgIndex - } - }) - except Exception as imgE: - logger.warning(f"Error extracting image {imgIndex} on page {pageNum + 1}: {str(imgE)}") - - # Close document - doc.close() - - except Exception as imgExtractE: - logger.warning(f"Error extracting images from PDF: {str(imgExtractE)}") - - except Exception as e: - logger.error(f"Error in PDF extraction: {str(e)}") - - # If no content was extracted, add the original PDF - if not extractedContentFound: - contents.append({ - "sequenceNr": 1, - "name": "1_pdf", # Simplified naming - "ext": "pdf", - "contentType": "application/pdf", - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "pdf" - } - }) - - return contents - -def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: - """ - Extracts text and images from Word documents. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - mimeType: MIME type of the file - - Returns: - List of Word-Content objects (text and possibly images) with appropriate base64Encoded flags - """ - contents = [] - extractedContentFound = False - - # Determine file extension - fileExtension = "docx" if mimeType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" else "doc" - - try: - # Load Office extraction libraries - _loadOfficeExtractor() - if not officeExtractorLoaded: - logger.warning("Word extraction not possible: Libraries not available") - # Add original file as binary content - contents.append({ - "sequenceNr": 1, - "name": "1_word", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "word" - } - }) - return contents - - # Only supports DOCX (newer format) - if mimeType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document": - with io.BytesIO(fileContent) as docxStream: - doc = docx.Document(docxStream) - - # Extract text - fullText = [] - for para in doc.paragraphs: - fullText.append(para.text) - - # Extract tables - for table in doc.tables: - for row in table.rows: - rowText = [] - for cell in row.cells: - rowText.append(cell.text) - fullText.append(" | ".join(rowText)) - - extractedText = "\n\n".join(fullText) - - # Add extracted text as content - if extractedText.strip(): - extractedContentFound = True - contents.append({ - "sequenceNr": 1, - "name": "1_text", # Simplified naming - "ext": "txt", - "contentType": "text/plain", - "data": extractedText, - "base64Encoded": False, - "metadata": { - "isText": True, - "source": "docx", - "paragraphCount": len(doc.paragraphs), - "tableCount": len(doc.tables) - } - }) - else: - logger.warning(f"Extraction from old Word format (DOC) not supported") - - except Exception as e: - logger.error(f"Error in Word extraction: {str(e)}") - - # If no content was extracted, add the original document - if not extractedContentFound: - contents.append({ - "sequenceNr": 1, - "name": "1_word", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "word" - } - }) - - return contents - -def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: - """ - Extracts table data from Excel files. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - mimeType: MIME type of the file - - Returns: - List of Excel-Content objects with appropriate base64Encoded flags - """ - contents = [] - extractedContentFound = False - - # Determine file extension - fileExtension = "xlsx" if mimeType == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" else "xls" - - try: - # Load Office extraction libraries - _loadOfficeExtractor() - if not officeExtractorLoaded: - logger.warning("Excel extraction not possible: Libraries not available") - # Add original file as binary content - contents.append({ - "sequenceNr": 1, - "name": "1_excel", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "excel" - } - }) - return contents - - # Only supports XLSX (newer format) - if mimeType == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": - with io.BytesIO(fileContent) as xlsxStream: - workbook = openpyxl.load_workbook(xlsxStream, data_only=True) - - # Extract each worksheet as separate CSV content - for sheetIndex, sheetName in enumerate(workbook.sheetnames): - sheet = workbook[sheetName] - - # Format data as CSV - csvRows = [] - for row in sheet.iter_rows(): - csvRow = [] - for cell in row: - value = cell.value - if value is None: - csvRow.append("") - else: - csvRow.append(str(value).replace('"', '""')) - csvRows.append(','.join(f'"{cell}"' for cell in csvRow)) - - csvContent = "\n".join(csvRows) - - # Add as CSV content - if csvContent.strip(): - extractedContentFound = True - sheetSafeName = sheetName.replace(" ", "_").replace("/", "_").replace("\\", "_") - contents.append({ - "sequenceNr": len(contents) + 1, - "name": f"{len(contents) + 1}_csv_{sheetSafeName}", # Simplified naming with sheet label - "ext": "csv", - "contentType": "text/csv", - "data": csvContent, - "base64Encoded": False, - "metadata": { - "isText": True, - "source": "xlsx", - "sheet": sheetName, - "format": "csv" - } - }) - else: - logger.warning(f"Extraction from old Excel format (XLS) not supported") - - except Exception as e: - logger.error(f"Error in Excel extraction: {str(e)}") - - # If no content was extracted, add the original document - if not extractedContentFound: - contents.append({ - "sequenceNr": 1, - "name": "1_excel", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "excel" - } - }) - - return contents - -def extractPowerpointContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: - """ - Extracts content from PowerPoint presentations. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - mimeType: MIME type of the file - - Returns: - List of PowerPoint-Content objects with base64Encoded = True - """ - # For PowerPoint, we currently only return the original binary file - # A complete extraction would require more specialized libraries - fileExtension = "pptx" if mimeType == "application/vnd.openxmlformats-officedocument.presentationml.presentation" else "ppt" - return [{ - "sequenceNr": 1, - "name": "1_powerpoint", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "powerpoint" - } - }] - -def extractBinaryContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: - """ - Fallback for binary files where no specific extraction is possible. - - Args: - fileName: Name of the file - fileContent: Binary data of the file - mimeType: MIME type of the file - - Returns: - List with a binary Content object with base64Encoded = True - """ - fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin" - return [{ - "sequenceNr": 1, - "name": "1_binary", # Simplified naming - "ext": fileExtension, - "contentType": mimeType, - "data": base64.b64encode(fileContent).decode('utf-8'), - "base64Encoded": True, - "metadata": { - "isText": False, - "format": "binary" - } - }] \ No newline at end of file diff --git a/static/24_defAttributes.py b/static/24_defAttributes.py deleted file mode 100644 index 731ecfd9..00000000 --- a/static/24_defAttributes.py +++ /dev/null @@ -1,123 +0,0 @@ -from pydantic import BaseModel, Field -from typing import List, Dict, Any, Optional - -# Define the model for attribute definitions -class AttributeDefinition(BaseModel): - name: str - label: str - type: str - required: bool = False - placeholder: Optional[str] = None - defaultValue: Optional[Any] = None - options: Optional[List[Dict[str, Any]]] = None - editable: bool = True - visible: bool = True - order: int = 0 - validation: Optional[Dict[str, Any]] = None - helpText: Optional[str] = None - -# Helper classes for type mapping -typeMappings = { - "int": "number", - "str": "string", - "float": "number", - "bool": "boolean", - "List[int]": "array", - "List[str]": "array", - "Dict[str, Any]": "object", - "Optional[str]": "string", - "Optional[int]": "number", - "Optional[Dict[str, Any]]": "object" -} - -# Special field types based on naming conventions -specialFieldTypes = { - "content": "textarea", - "description": "textarea", - "instructions": "textarea", - "password": "password", - "email": "email", - "workspaceId": "select", - "agentId": "select", - "type": "select" -} - -# Function to convert a Pydantic model into attribute definitions -def getModelAttributes(modelClass, userLanguage="de"): - """ - Converts a Pydantic model into a list of AttributeDefinition objects - """ - attributes = [] - - # Go through all fields in the model - for i, (fieldName, field) in enumerate(modelClass.__fields__.items()): - # Skip internal fields - if fieldName.startswith('_') or fieldName in ["label", "fieldLabels"]: - continue - - # Determine the field type - fieldType = typeMappings.get(str(field.type_), "string") - - # Check for special field types - if fieldName in specialFieldTypes: - fieldType = specialFieldTypes[fieldName] - - # Get the label (if available) - fieldLabel = fieldName.replace('_', ' ').capitalize() - if hasattr(modelClass, 'fieldLabels') and fieldName in modelClass.fieldLabels: - labelObj = modelClass.fieldLabels[fieldName] - fieldLabel = labelObj.getLabel(userLanguage) - - # Determine default values and required status - required = field.required - defaultValue = field.default if not field.required else None - - # Check for validation rules - validation = None - if field.validators: - validation = {"hasValidators": True} - - # Placeholder text - placeholder = f"Please enter {fieldLabel}" - - # Special options for Select fields - options = None - if fieldType == "select": - if fieldName == "type" and modelClass.__name__ == "Agent": - options = [ - {"value": "Analysis", "label": "Analysis"}, - {"value": "Transformation", "label": "Transformation"}, - {"value": "Generation", "label": "Generation"}, - {"value": "Classification", "label": "Classification"}, - {"value": "Custom", "label": "Custom"} - ] - - # Extract description from Field object - description = None - # Try to get description from various possible sources - if hasattr(field, 'field_info') and hasattr(field.field_info, 'description'): - description = field.field_info.description - elif hasattr(field, 'description'): - description = field.description - elif hasattr(field, 'schema') and hasattr(field.schema, 'description'): - description = field.schema.description - - # Create attribute definition - attrDef = AttributeDefinition( - name=fieldName, - label=fieldLabel, - type=fieldType, - required=required, - placeholder=placeholder, - defaultValue=defaultValue, - options=options, - editable=fieldName not in ["id", "mandateId", "userId", "createdAt", "uploadDate"], - visible=fieldName not in ["hashedPassword", "mandateId", "userId"], - order=i, - validation=validation, - helpText=description or "" # Set empty string as default value if no description found - ) - - attributes.append(attrDef) - - return attributes \ No newline at end of file diff --git a/static/25_email_preview.html b/static/25_email_preview.html deleted file mode 100644 index b9a1d176..00000000 --- a/static/25_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Attached: documentProcessor.py and defAttributes.py - - - -
- - - -
- - - \ No newline at end of file diff --git a/static/26_email_template.json b/static/26_email_template.json deleted file mode 100644 index bbc2aa46..00000000 --- a/static/26_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "recipient@example.com", - "subject": "Attached: documentProcessor.py and defAttributes.py", - "plainBody": "Sehr geehrte Damen und Herren,\n\nanbei finden Sie die angeforderten Dokumente 'documentProcessor.py' und 'defAttributes.py'. Bitte z\u00f6gern Sie nicht, sich bei Fragen oder weiteren Anliegen an uns zu wenden.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\nIhr Team", - "htmlBody": "

Sehr geehrte Damen und Herren,

anbei finden Sie die angeforderten Dokumente documentProcessor.py und defAttributes.py. Bitte z\u00f6gern Sie nicht, sich bei Fragen oder weiteren Anliegen an uns zu wenden.

Mit freundlichen Gr\u00fc\u00dfen,
Ihr Team

" -} \ No newline at end of file diff --git a/static/27_email_preview.html b/static/27_email_preview.html deleted file mode 100644 index b250da5e..00000000 --- a/static/27_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Angehängt: documentProcessor.py und defAttributes.py - - - -
- - - -
- - - \ No newline at end of file diff --git a/static/28_email_template.json b/static/28_email_template.json deleted file mode 100644 index e0355eea..00000000 --- a/static/28_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "team@example.com", - "subject": "Angeh\u00e4ngt: documentProcessor.py und defAttributes.py", - "plainBody": "Liebe Teammitglieder,\n\nim Anhang finden Sie die Dateien documentProcessor.py und defAttributes.py. Bitte \u00fcberpr\u00fcfen Sie diese und geben Sie mir Ihr Feedback.\n\nMit freundlichen Gr\u00fc\u00dfen,\n[Ihr Name]", - "htmlBody": "
E-Mail-Vorschau

Liebe Teammitglieder,

im Anhang finden Sie die Dateien documentProcessor.py und defAttributes.py. Bitte \u00fcberpr\u00fcfen Sie diese und geben Sie mir Ihr Feedback.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

Dies ist eine Vorschau der E-Mail und kann in verschiedenen E-Mail-Clients unterschiedlich angezeigt werden.
" -} \ No newline at end of file diff --git a/static/29_email_preview.html b/static/29_email_preview.html deleted file mode 100644 index f4e97f9e..00000000 --- a/static/29_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Neuer Termin für unser Meeting - - - -
- - - -
- - - \ No newline at end of file diff --git a/static/2_LF-Details_Description.txt b/static/2_LF-Details_Description.txt deleted file mode 100644 index 74ea1ecd..00000000 --- a/static/2_LF-Details_Description.txt +++ /dev/null @@ -1,295 +0,0 @@ -Comprehensive Workflow and Team Roles in Product Development -============================================================ - -# Introduction to Comprehensive Workflow and Team Roles in Product Development - -## Purpose and Scope - -This guide, "Comprehensive Workflow and Team Roles in Product Development," is meticulously crafted to serve as an essential resource for professionals involved in the intricate process of product development. It aims to provide a detailed exploration of the workflows and team roles that are pivotal in transforming innovative ideas into successful products. By delving into the structured processes and collaborative dynamics that drive product development, this guide offers valuable insights into optimizing efficiency and effectiveness within technical teams. - -## Context and Background - -In today's fast-paced technological landscape, the development of a product from concept to market-ready status involves a complex interplay of various teams and tools. Each team, whether it be product management, engineering, quality assurance (QA), or operations, plays a critical role in ensuring that the product not only meets market demands but also adheres to high standards of quality and functionality. The integration of sophisticated tools for ticketing, roadmaps, and management dashboards further enhances the ability of these teams to coordinate and execute their tasks with precision. - -## Document Outline - -Readers of this guide will embark on a comprehensive journey through the product development lifecycle. The document is structured to provide: - -1. **An Overview of Product Development Workflow**: A detailed examination of the stages involved in product development, from initial concept through to deployment and maintenance. - -2. **Team Roles and Responsibilities**: Insight into the specific roles and responsibilities of the product, engineering, QA, and operations teams, highlighting how each contributes to the overall success of the product. - -3. **Tool Integration**: An analysis of the tools that facilitate seamless workflow management, including ticketing systems, roadmap planning tools, and management dashboards, and how they integrate into the daily operations of development teams. - -4. **Best Practices and Case Studies**: Practical examples and case studies that illustrate successful implementations of workflows and team collaborations in real-world scenarios. - -## Tone and Audience - -This guide is tailored for a technical audience, including product managers, engineers, QA specialists, and operations professionals. The tone is formal and professional, designed to engage readers who are seeking to deepen their understanding of product development processes and enhance their team's performance. By providing a structured and insightful examination of workflows and roles, this guide aims to empower technical teams to achieve greater synergy and success in their product development endeavors. - -Introduction ------------- - -# Introduction - -In the rapidly evolving landscape of product development, understanding the intricate workflow and the pivotal roles played by various teams is essential for achieving success. This guide, "Comprehensive Workflow and Team Roles in Product Development," aims to provide a detailed exploration of the processes and team dynamics that drive product innovation from conception to deployment. By delving into the workflow stages and the integration of essential tools, this guide serves as an invaluable resource for technical professionals seeking to optimize their product development strategies. - -## Overview of the Product Development Workflow - -The product development workflow is a structured sequence of stages that transforms initial ideas into market-ready products. This workflow is designed to ensure that each phase of development is meticulously planned and executed, minimizing risks and maximizing efficiency. The workflow typically begins with the **Input** stage, where ideas are sourced from customers, sales teams, and internal brainstorming sessions. These inputs serve as the foundation for the subsequent stages, guiding the product team in aligning development efforts with market needs and business objectives. - -The workflow progresses through several critical stages, each involving specific teams and processes: - -1. **Product Team:** - - **Discover:** - - **Collect:** The product team gathers a diverse array of ideas and inputs, ensuring a comprehensive understanding of potential opportunities. - - **Qualify:** Ideas are analyzed and matched against business goals, market trends, and feasibility, allowing the team to prioritize the most promising concepts. - -2. **Engineering Team:** - - **Design and Develop:** The engineering team translates qualified ideas into technical specifications and begins the development process, focusing on creating robust and scalable solutions. - -3. **Q&A Team:** - - **Test and Validate:** Quality assurance plays a crucial role in ensuring that the product meets the highest standards of quality and functionality. Rigorous testing and validation processes are employed to identify and rectify any issues before release. - -4. **Operations:** - - **Deploy and Monitor:** The operations team is responsible for deploying the product to the market and continuously monitoring its performance. This stage involves the integration of feedback loops to facilitate ongoing improvements and adaptations. - -## Importance of Team Roles and Tool Integration - -The success of the product development process hinges on the effective collaboration and coordination of various teams, each contributing their unique expertise and perspectives. The **Product Team** is tasked with strategic planning and market alignment, while the **Engineering Team** focuses on technical execution. The **Q&A Team** ensures quality assurance, and the **Operations Team** manages deployment and performance monitoring. - -In addition to clearly defined roles, the integration of specialized tools is crucial for streamlining workflows and enhancing productivity. Tools for **ticketing** facilitate efficient task management and communication across teams, ensuring that issues are promptly addressed and resolved. **Roadmaps** provide a visual representation of the product development timeline, helping teams stay aligned with project goals and deadlines. **Management dashboards** offer real-time insights into project progress and performance metrics, enabling informed decision-making and strategic adjustments. - -By leveraging these tools, teams can enhance collaboration, improve transparency, and maintain a cohesive approach to product development. This integration not only optimizes the workflow but also empowers teams to deliver high-quality products that meet customer expectations and drive business success. - -In conclusion, understanding the comprehensive workflow and the critical roles of each team in product development is essential for navigating the complexities of modern product innovation. This guide will delve deeper into each aspect, providing technical professionals with the knowledge and tools needed to excel in their roles and contribute to successful product outcomes. - -Teams Involved --------------- - -# Teams Involved - -In the complex landscape of product development, multiple teams collaborate to ensure the successful delivery of a product from conception to deployment. Each team plays a critical role in the workflow, contributing their expertise to different stages of the process. This section provides a detailed overview of the roles and responsibilities of the key teams involved: the Product Team, the Engineering Team, the QA Team, and the Operations Team. - -## Product Team - -The Product Team is at the forefront of the product development process, responsible for setting the vision and direction of the product. Their roles include: - -- **Discover:** - - **Collect:** The Product Team gathers ideas and inputs from various sources, including customers, sales teams, and internal stakeholders. This stage is crucial for understanding market needs and identifying potential opportunities. - - **Qualify:** Once ideas are collected, the team analyzes them to ensure alignment with business objectives and feasibility. This involves evaluating the potential impact and prioritizing ideas based on strategic goals. - -- **Define:** - - **Roadmap Creation:** The Product Team develops a product roadmap that outlines the strategic direction and key milestones. This roadmap serves as a guiding document for all teams involved in the development process. - - **Requirements Specification:** Detailed product requirements are documented, providing clear guidance for the Engineering Team. This includes user stories, acceptance criteria, and any necessary technical specifications. - -## Engineering Team - -The Engineering Team is responsible for transforming the product vision into a tangible, functional product. Their roles encompass: - -- **Design and Development:** - - **Architecture Design:** Engineers design the system architecture, ensuring scalability, reliability, and performance. This involves selecting appropriate technologies and frameworks. - - **Implementation:** The team writes code and develops features according to the specifications provided by the Product Team. They ensure that the product is built to meet the defined requirements. - -- **Integration:** - - **Tool Integration:** Engineers integrate various tools for ticketing, roadmaps, and management dashboards to streamline the development process and enhance collaboration across teams. - - **Continuous Integration/Continuous Deployment (CI/CD):** The team implements CI/CD pipelines to automate testing and deployment, ensuring rapid and reliable delivery of new features and updates. - -## QA Team - -The QA (Quality Assurance) Team plays a pivotal role in maintaining the quality and reliability of the product. Their responsibilities include: - -- **Testing:** - - **Test Planning:** The QA Team develops comprehensive test plans that cover all aspects of the product, including functionality, performance, and security. - - **Execution:** They conduct various types of testing, such as unit testing, integration testing, and user acceptance testing, to identify and resolve defects before the product reaches the end-users. - -- **Quality Control:** - - **Defect Management:** The team tracks and manages defects using ticketing systems, ensuring that issues are addressed promptly and effectively. - - **Continuous Improvement:** QA professionals analyze testing outcomes to identify areas for improvement, contributing to the enhancement of product quality over time. - -## Operations Team - -The Operations Team ensures that the product is deployed smoothly and operates efficiently in the production environment. Their roles include: - -- **Deployment:** - - **Release Management:** The Operations Team manages the release process, coordinating with other teams to ensure that deployments are executed without disruptions. - - **Environment Configuration:** They configure and maintain the production environment, ensuring that it meets the necessary requirements for optimal performance. - -- **Monitoring and Support:** - - **System Monitoring:** The team implements monitoring tools to track system performance and detect issues in real-time. This proactive approach helps in maintaining high availability and reliability. - - **Incident Response:** In the event of system failures or performance issues, the Operations Team is responsible for incident management and resolution, minimizing downtime and impact on users. - -Each team plays a vital role in the product development lifecycle, and their collaboration is essential for delivering high-quality products that meet customer needs and business objectives. By integrating tools and processes effectively, these teams ensure a seamless workflow from ideation to deployment. - -Workflow Stages ---------------- - -# Workflow Stages - -In the product development lifecycle, understanding the workflow stages is crucial for ensuring seamless collaboration among various teams and achieving successful product outcomes. This section provides a detailed overview of the workflow stages, focusing on the roles of the product, engineering, QA, and operations teams, and the integration of tools for ticketing, roadmaps, and management dashboards. - -## Input - -The initial stage of the workflow involves gathering inputs from various sources to fuel the product development process. These inputs are critical for identifying potential opportunities and challenges. - -- **Sources:** - - **Customers:** Feedback and suggestions from end-users provide valuable insights into product improvements and new features. - - **Sales:** Information from the sales team highlights market demands and competitive landscape, guiding product prioritization. - - **Internal Ideas:** Contributions from team members across the organization can lead to innovative solutions and enhancements. - -## Product Team Processes - -The product team plays a pivotal role in transforming raw inputs into actionable plans. This stage is divided into several key processes: - -- **Discover:** - - **Collect:** The product team gathers ideas and inputs from various sources, ensuring a comprehensive understanding of user needs and market trends. - - **Qualify:** Ideas are analyzed and matched against business objectives and feasibility to determine their potential impact and alignment with the company's vision. - -- **Define:** - - **Prioritize:** The team prioritizes ideas based on strategic importance, resource availability, and potential ROI. - - **Plan:** Detailed plans are developed, outlining the scope, objectives, and timelines for each initiative. - -- **Shape:** - - **Design:** The product team collaborates with designers to create wireframes and prototypes, ensuring the proposed solutions are user-friendly and effective. - - **Specification:** Detailed specifications are documented, providing clear guidance for the engineering team. - -## Engineering Team Processes - -Once the product team has defined and shaped the product, the engineering team takes over to assess and develop the technical aspects. - -- **Assessment:** - - **Feasibility Study:** Engineers evaluate the technical feasibility of the proposed solutions, identifying potential challenges and resource requirements. - - **Technical Planning:** A detailed technical plan is created, outlining the architecture, technologies, and tools to be used. - -- **Development:** - - **Implementation:** The engineering team begins coding and building the product, adhering to the specifications and timelines. - - **Integration:** New features and updates are integrated into the existing system, ensuring compatibility and performance. - -## QA Team Processes - -Quality assurance is a critical stage in the workflow, ensuring that the product meets the highest standards before deployment. - -- **Testing:** - - **Unit Testing:** Individual components are tested to ensure they function correctly in isolation. - - **Integration Testing:** The product is tested as a whole to verify that all components work together seamlessly. - -- **Validation:** - - **User Acceptance Testing (UAT):** The product is tested in real-world scenarios to validate its functionality and usability. - - **Bug Fixing:** Any issues identified during testing are addressed and resolved promptly. - -## Operations Team Processes - -The final stage involves deploying the product and monitoring its performance in the live environment. - -- **Deployment:** - - **Release Management:** The operations team manages the release process, ensuring a smooth transition from development to production. - - **Configuration:** The product is configured for optimal performance and security in the live environment. - -- **Monitoring:** - - **Performance Monitoring:** Continuous monitoring of the product's performance helps identify and address any issues proactively. - - **Feedback Loop:** Feedback from users and performance data are collected to inform future improvements and updates. - -In conclusion, each stage of the workflow is integral to the success of product development. By clearly defining roles and processes, and leveraging tools for ticketing, roadmaps, and management dashboards, teams can collaborate effectively to deliver high-quality products that meet user needs and business objectives. - -Tool Integration ----------------- - -Title: Tool Integration - -In the realm of product development, the integration of various tools is crucial to streamline processes, enhance collaboration, and ensure efficient workflow management. This section delves into the essential tools used in product development, focusing on ticketing systems, roadmap tools, and management dashboards. Each tool plays a pivotal role in facilitating communication and coordination among the product, engineering, QA, and operations teams. - -## Ticketing Systems - -Ticketing systems are the backbone of issue tracking and task management within product development. These systems enable teams to log, prioritize, and track the progress of tasks and issues throughout the development lifecycle. - -### Key Features: -- **Issue Tracking:** Allows teams to report bugs, feature requests, and other tasks, ensuring nothing falls through the cracks. -- **Prioritization:** Facilitates the organization of tasks based on urgency and importance, helping teams focus on high-impact work. -- **Collaboration:** Provides a platform for team members to discuss issues, share updates, and collaborate on solutions. -- **Integration:** Often integrates with other tools such as version control systems and CI/CD pipelines to provide a seamless workflow. - -### Examples: -- **Jira:** Widely used for its robust features and flexibility, Jira supports agile methodologies and offers extensive customization options. -- **Zendesk:** Known for its customer support capabilities, Zendesk also provides ticketing solutions that integrate customer feedback directly into the development process. - -## Roadmap Tools - -Roadmap tools are essential for strategic planning and communication of product vision and progress. They help align the product team’s efforts with business goals and provide a clear timeline for stakeholders. - -### Key Features: -- **Visualization:** Offers visual representations of product timelines, milestones, and dependencies, making it easier to communicate plans. -- **Collaboration:** Enables cross-functional teams to contribute to and update the roadmap, ensuring alignment across departments. -- **Flexibility:** Allows for adjustments as priorities shift, ensuring the roadmap remains relevant and actionable. - -### Examples: -- **Aha!:** A comprehensive tool that supports product strategy, planning, and roadmapping, with features for capturing ideas and aligning them with business objectives. -- **ProductPlan:** Known for its intuitive interface, ProductPlan allows teams to create and share roadmaps easily, facilitating stakeholder engagement. - -## Management Dashboards - -Management dashboards provide a high-level overview of project status, performance metrics, and key performance indicators (KPIs). They are crucial for decision-making and ensuring that projects stay on track. - -### Key Features: -- **Real-Time Data:** Offers up-to-date insights into project progress, resource allocation, and team performance. -- **Customization:** Allows managers to tailor dashboards to display the most relevant data for their specific needs. -- **Integration:** Connects with various data sources to provide a comprehensive view of the project landscape. - -### Examples: -- **Tableau:** A powerful analytics platform that enables the creation of interactive dashboards, providing deep insights into project data. -- **Power BI:** Microsoft's business analytics service that delivers robust data visualization and reporting capabilities, integrating seamlessly with other Microsoft tools. - -In conclusion, the integration of ticketing systems, roadmap tools, and management dashboards is vital for the efficient operation of product development teams. These tools not only enhance communication and collaboration but also provide the necessary infrastructure to manage complex workflows and align team efforts with strategic objectives. By leveraging these tools, teams can ensure a more organized, transparent, and effective product development process. - -Conclusion ----------- - -# Conclusion - -In this guide, we have explored the intricate workflow and team roles that are essential in the product development process. By understanding these components, teams can enhance their efficiency and effectiveness in bringing products to market. This conclusion will summarize the key aspects of the workflow and team roles, as well as highlight the benefits of integrating tools that facilitate seamless collaboration and management. - -## Summary of Workflow and Team Roles - -The product development process is a collaborative effort that involves multiple teams, each with distinct roles and responsibilities. The workflow begins with the **Product Team**, which is responsible for the discovery phase. This phase involves collecting inputs from various sources such as customers, sales, and internal ideas, and qualifying these inputs against business objectives and market needs. - -Once the product requirements are defined, the **Engineering Team** takes over to design and develop the product. This stage is critical as it transforms ideas into tangible solutions. The engineering team works closely with the product team to ensure that the technical specifications align with the product vision. - -The **Quality Assurance (QA) Team** plays a pivotal role in maintaining the integrity of the product. Through rigorous testing and validation, the QA team ensures that the product meets the required standards and functions as intended. Their feedback is crucial for identifying and rectifying defects before the product reaches the market. - -Finally, the **Operations Team** is responsible for deploying and maintaining the product. They ensure that the product is delivered efficiently and that any operational issues are promptly addressed. This team also monitors the product's performance and gathers data to inform future development cycles. - -## Benefits of Integrated Tools - -The integration of tools for ticketing, roadmaps, and management dashboards significantly enhances the product development process. These tools provide a centralized platform for tracking progress, managing tasks, and facilitating communication across teams. - -- **Ticketing Systems**: These systems streamline issue tracking and resolution, allowing teams to prioritize tasks and allocate resources effectively. By maintaining a clear record of issues and resolutions, teams can improve their response times and reduce downtime. - -- **Roadmaps**: Product roadmaps offer a strategic overview of the product's development trajectory. They help teams align their efforts with long-term goals and ensure that all stakeholders are informed of the product's progress and future direction. - -- **Management Dashboards**: Dashboards provide real-time insights into the development process, enabling managers to make informed decisions. They offer visibility into key performance indicators (KPIs) and facilitate the identification of bottlenecks or areas for improvement. - -In conclusion, a well-defined workflow and clear team roles are fundamental to successful product development. By leveraging integrated tools, teams can enhance their collaboration, streamline processes, and ultimately deliver high-quality products that meet market demands. As organizations continue to evolve, embracing these practices will be crucial for maintaining a competitive edge in the ever-changing landscape of product development. - -CONCLUSION ----------- - -Conclusion - -In this guide, "Comprehensive Workflow and Team Roles in Product Development," we have explored the intricate processes and collaborative efforts that drive successful product development. By dissecting the workflow, we have highlighted the critical roles played by various teams, including product management, engineering, quality assurance (QA), and operations. Each team's responsibilities are pivotal in ensuring that the product development lifecycle is efficient, effective, and aligned with organizational goals. - -Key Points Summary: -- **Product Development Workflow**: We detailed the sequential and iterative processes involved in product development, emphasizing the importance of clear communication and structured phases from ideation to deployment. -- **Team Roles**: The guide outlined the specific roles and responsibilities of the product, engineering, QA, and operations teams. Each team contributes uniquely to the development process, ensuring that products are not only built to specifications but also meet quality standards and operational requirements. -- **Tool Integration**: We discussed the integration of various tools that facilitate seamless workflow management. Tools for ticketing, roadmaps, and management dashboards play a crucial role in tracking progress, managing tasks, and ensuring transparency across teams. - -Closure and Recommendations: -Understanding the workflow and team roles in product development is essential for any organization aiming to enhance its product delivery capabilities. By implementing structured processes and fostering collaboration across teams, organizations can improve efficiency, reduce time-to-market, and increase product quality. It is recommended that teams continuously evaluate and refine their workflows and tool integrations to adapt to evolving project needs and technological advancements. - -Next Steps: -- Encourage cross-functional training to enhance team collaboration and understanding of each other's roles. -- Regularly review and update tool integrations to ensure they align with current project requirements and industry standards. -- Foster a culture of continuous improvement by soliciting feedback from all team members and stakeholders. - -Significance: -This guide serves as a foundational resource for technical teams and project managers involved in product development. By providing a comprehensive overview of workflows and team roles, it equips readers with the knowledge to optimize their development processes and achieve successful product outcomes. As the landscape of product development continues to evolve, staying informed and adaptable will be key to maintaining a competitive edge. - -Thank you for engaging with this guide. We hope it serves as a valuable asset in your product development endeavors. diff --git a/static/30_email_template.json b/static/30_email_template.json deleted file mode 100644 index b4745416..00000000 --- a/static/30_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "peter.muster@domain.com", - "subject": "Neuer Termin f\u00fcr unser Meeting", - "plainBody": "Sehr geehrter Herr Muster,\n\nich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.\n\nVielen Dank f\u00fcr Ihr Verst\u00e4ndnis.\n\nMit freundlichen Gr\u00fc\u00dfen,\n\n[Ihr Name]", - "htmlBody": "

Sehr geehrter Herr Muster,

ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.

Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.

Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]

" -} \ No newline at end of file diff --git a/static/3_generated_code.py b/static/3_generated_code.py deleted file mode 100644 index 3b2db9ae..00000000 --- a/static/3_generated_code.py +++ /dev/null @@ -1,38 +0,0 @@ -inputFiles = [] # DO NOT CHANGE THIS LINE - -def is_prime(n): - if n <= 1: - return False - if n <= 3: - return True - if n % 2 == 0 or n % 3 == 0: - return False - i = 5 - while i * i <= n: - if n % i == 0 or n % (i + 2) == 0: - return False - i += 6 - return True - -def generate_primes(limit): - primes = [] - num = 2 - while len(primes) < limit: - if is_prime(num): - primes.append(num) - num += 1 - return primes - -primes = generate_primes(1000) -prime_numbers_content = "\n".join(map(str, primes)) - -result = { - "prime_numbers.txt": { - "content": prime_numbers_content, - "base64Encoded": False, - "contentType": "text/plain" - } -} - -import json -print(json.dumps(result)) \ No newline at end of file diff --git a/static/4_execution_history.json b/static/4_execution_history.json deleted file mode 100644 index 7ee8aa63..00000000 --- a/static/4_execution_history.json +++ /dev/null @@ -1,19 +0,0 @@ -[ - { - "attempt": 1, - "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))", - "result": { - "success": true, - "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\\n7919\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n", - "error": "", - "result": { - "prime_numbers.txt": { - "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907\n7919", - "base64Encoded": false, - "contentType": "text/plain" - } - }, - "exitCode": 0 - } - } -] \ No newline at end of file diff --git a/static/5_prime_numbers.txt b/static/5_prime_numbers.txt deleted file mode 100644 index 4dbadc38..00000000 --- a/static/5_prime_numbers.txt +++ /dev/null @@ -1,1000 +0,0 @@ -2 -3 -5 -7 -11 -13 -17 -19 -23 -29 -31 -37 -41 -43 -47 -53 -59 -61 -67 -71 -73 -79 -83 -89 -97 -101 -103 -107 -109 -113 -127 -131 -137 -139 -149 -151 -157 -163 -167 -173 -179 -181 -191 -193 -197 -199 -211 -223 -227 -229 -233 -239 -241 -251 -257 -263 -269 -271 -277 -281 -283 -293 -307 -311 -313 -317 -331 -337 -347 -349 -353 -359 -367 -373 -379 -383 -389 -397 -401 -409 -419 -421 -431 -433 -439 -443 -449 -457 -461 -463 -467 -479 -487 -491 -499 -503 -509 -521 -523 -541 -547 -557 -563 -569 -571 -577 -587 -593 -599 -601 -607 -613 -617 -619 -631 -641 -643 -647 -653 -659 -661 -673 -677 -683 -691 -701 -709 -719 -727 -733 -739 -743 -751 -757 -761 -769 -773 -787 -797 -809 -811 -821 -823 -827 -829 -839 -853 -857 -859 -863 -877 -881 -883 -887 -907 -911 -919 -929 -937 -941 -947 -953 -967 -971 -977 -983 -991 -997 -1009 -1013 -1019 -1021 -1031 -1033 -1039 -1049 -1051 -1061 -1063 -1069 -1087 -1091 -1093 -1097 -1103 -1109 -1117 -1123 -1129 -1151 -1153 -1163 -1171 -1181 -1187 -1193 -1201 -1213 -1217 -1223 -1229 -1231 -1237 -1249 -1259 -1277 -1279 -1283 -1289 -1291 -1297 -1301 -1303 -1307 -1319 -1321 -1327 -1361 -1367 -1373 -1381 -1399 -1409 -1423 -1427 -1429 -1433 -1439 -1447 -1451 -1453 -1459 -1471 -1481 -1483 -1487 -1489 -1493 -1499 -1511 -1523 -1531 -1543 -1549 -1553 -1559 -1567 -1571 -1579 -1583 -1597 -1601 -1607 -1609 -1613 -1619 -1621 -1627 -1637 -1657 -1663 -1667 -1669 -1693 -1697 -1699 -1709 -1721 -1723 -1733 -1741 -1747 -1753 -1759 -1777 -1783 -1787 -1789 -1801 -1811 -1823 -1831 -1847 -1861 -1867 -1871 -1873 -1877 -1879 -1889 -1901 -1907 -1913 -1931 -1933 -1949 -1951 -1973 -1979 -1987 -1993 -1997 -1999 -2003 -2011 -2017 -2027 -2029 -2039 -2053 -2063 -2069 -2081 -2083 -2087 -2089 -2099 -2111 -2113 -2129 -2131 -2137 -2141 -2143 -2153 -2161 -2179 -2203 -2207 -2213 -2221 -2237 -2239 -2243 -2251 -2267 -2269 -2273 -2281 -2287 -2293 -2297 -2309 -2311 -2333 -2339 -2341 -2347 -2351 -2357 -2371 -2377 -2381 -2383 -2389 -2393 -2399 -2411 -2417 -2423 -2437 -2441 -2447 -2459 -2467 -2473 -2477 -2503 -2521 -2531 -2539 -2543 -2549 -2551 -2557 -2579 -2591 -2593 -2609 -2617 -2621 -2633 -2647 -2657 -2659 -2663 -2671 -2677 -2683 -2687 -2689 -2693 -2699 -2707 -2711 -2713 -2719 -2729 -2731 -2741 -2749 -2753 -2767 -2777 -2789 -2791 -2797 -2801 -2803 -2819 -2833 -2837 -2843 -2851 -2857 -2861 -2879 -2887 -2897 -2903 -2909 -2917 -2927 -2939 -2953 -2957 -2963 -2969 -2971 -2999 -3001 -3011 -3019 -3023 -3037 -3041 -3049 -3061 -3067 -3079 -3083 -3089 -3109 -3119 -3121 -3137 -3163 -3167 -3169 -3181 -3187 -3191 -3203 -3209 -3217 -3221 -3229 -3251 -3253 -3257 -3259 -3271 -3299 -3301 -3307 -3313 -3319 -3323 -3329 -3331 -3343 -3347 -3359 -3361 -3371 -3373 -3389 -3391 -3407 -3413 -3433 -3449 -3457 -3461 -3463 -3467 -3469 -3491 -3499 -3511 -3517 -3527 -3529 -3533 -3539 -3541 -3547 -3557 -3559 -3571 -3581 -3583 -3593 -3607 -3613 -3617 -3623 -3631 -3637 -3643 -3659 -3671 -3673 -3677 -3691 -3697 -3701 -3709 -3719 -3727 -3733 -3739 -3761 -3767 -3769 -3779 -3793 -3797 -3803 -3821 -3823 -3833 -3847 -3851 -3853 -3863 -3877 -3881 -3889 -3907 -3911 -3917 -3919 -3923 -3929 -3931 -3943 -3947 -3967 -3989 -4001 -4003 -4007 -4013 -4019 -4021 -4027 -4049 -4051 -4057 -4073 -4079 -4091 -4093 -4099 -4111 -4127 -4129 -4133 -4139 -4153 -4157 -4159 -4177 -4201 -4211 -4217 -4219 -4229 -4231 -4241 -4243 -4253 -4259 -4261 -4271 -4273 -4283 -4289 -4297 -4327 -4337 -4339 -4349 -4357 -4363 -4373 -4391 -4397 -4409 -4421 -4423 -4441 -4447 -4451 -4457 -4463 -4481 -4483 -4493 -4507 -4513 -4517 -4519 -4523 -4547 -4549 -4561 -4567 -4583 -4591 -4597 -4603 -4621 -4637 -4639 -4643 -4649 -4651 -4657 -4663 -4673 -4679 -4691 -4703 -4721 -4723 -4729 -4733 -4751 -4759 -4783 -4787 -4789 -4793 -4799 -4801 -4813 -4817 -4831 -4861 -4871 -4877 -4889 -4903 -4909 -4919 -4931 -4933 -4937 -4943 -4951 -4957 -4967 -4969 -4973 -4987 -4993 -4999 -5003 -5009 -5011 -5021 -5023 -5039 -5051 -5059 -5077 -5081 -5087 -5099 -5101 -5107 -5113 -5119 -5147 -5153 -5167 -5171 -5179 -5189 -5197 -5209 -5227 -5231 -5233 -5237 -5261 -5273 -5279 -5281 -5297 -5303 -5309 -5323 -5333 -5347 -5351 -5381 -5387 -5393 -5399 -5407 -5413 -5417 -5419 -5431 -5437 -5441 -5443 -5449 -5471 -5477 -5479 -5483 -5501 -5503 -5507 -5519 -5521 -5527 -5531 -5557 -5563 -5569 -5573 -5581 -5591 -5623 -5639 -5641 -5647 -5651 -5653 -5657 -5659 -5669 -5683 -5689 -5693 -5701 -5711 -5717 -5737 -5741 -5743 -5749 -5779 -5783 -5791 -5801 -5807 -5813 -5821 -5827 -5839 -5843 -5849 -5851 -5857 -5861 -5867 -5869 -5879 -5881 -5897 -5903 -5923 -5927 -5939 -5953 -5981 -5987 -6007 -6011 -6029 -6037 -6043 -6047 -6053 -6067 -6073 -6079 -6089 -6091 -6101 -6113 -6121 -6131 -6133 -6143 -6151 -6163 -6173 -6197 -6199 -6203 -6211 -6217 -6221 -6229 -6247 -6257 -6263 -6269 -6271 -6277 -6287 -6299 -6301 -6311 -6317 -6323 -6329 -6337 -6343 -6353 -6359 -6361 -6367 -6373 -6379 -6389 -6397 -6421 -6427 -6449 -6451 -6469 -6473 -6481 -6491 -6521 -6529 -6547 -6551 -6553 -6563 -6569 -6571 -6577 -6581 -6599 -6607 -6619 -6637 -6653 -6659 -6661 -6673 -6679 -6689 -6691 -6701 -6703 -6709 -6719 -6733 -6737 -6761 -6763 -6779 -6781 -6791 -6793 -6803 -6823 -6827 -6829 -6833 -6841 -6857 -6863 -6869 -6871 -6883 -6899 -6907 -6911 -6917 -6947 -6949 -6959 -6961 -6967 -6971 -6977 -6983 -6991 -6997 -7001 -7013 -7019 -7027 -7039 -7043 -7057 -7069 -7079 -7103 -7109 -7121 -7127 -7129 -7151 -7159 -7177 -7187 -7193 -7207 -7211 -7213 -7219 -7229 -7237 -7243 -7247 -7253 -7283 -7297 -7307 -7309 -7321 -7331 -7333 -7349 -7351 -7369 -7393 -7411 -7417 -7433 -7451 -7457 -7459 -7477 -7481 -7487 -7489 -7499 -7507 -7517 -7523 -7529 -7537 -7541 -7547 -7549 -7559 -7561 -7573 -7577 -7583 -7589 -7591 -7603 -7607 -7621 -7639 -7643 -7649 -7669 -7673 -7681 -7687 -7691 -7699 -7703 -7717 -7723 -7727 -7741 -7753 -7757 -7759 -7789 -7793 -7817 -7823 -7829 -7841 -7853 -7867 -7873 -7877 -7879 -7883 -7901 -7907 -7919 \ No newline at end of file diff --git a/static/6_email_preview.html b/static/6_email_preview.html deleted file mode 100644 index 2f609af2..00000000 --- a/static/6_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - Email Preview: Verspätete Ankunft morgen - - - -
- - - -
- - - \ No newline at end of file diff --git a/static/7_email_template.json b/static/7_email_template.json deleted file mode 100644 index 9d50aa0d..00000000 --- a/static/7_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "i.dittrich@valueon.ch", - "subject": "Versp\u00e4tete Ankunft morgen", - "plainBody": "Hallo Ida,\n\nich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.\n\nBis dann!\n\nViele Gr\u00fc\u00dfe", - "htmlBody": "

Hallo Ida,

ich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.

Bis dann!

Viele Gr\u00fc\u00dfe

" -} \ No newline at end of file diff --git a/static/8_email_preview.html b/static/8_email_preview.html deleted file mode 100644 index dd5c9ff8..00000000 --- a/static/8_email_preview.html +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - Email Preview: Verspätete Ankunft morgen - - - -
- - - -
- - - \ No newline at end of file diff --git a/static/9_email_template.json b/static/9_email_template.json deleted file mode 100644 index 704bb247..00000000 --- a/static/9_email_template.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "recipient": "i.dittrich@valueon.ch", - "subject": "Versp\u00e4tete Ankunft morgen", - "plainBody": "Hallo Ida,\n\nich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.\n\nBis dann!\n\nViele Gr\u00fc\u00dfe", - "htmlBody": "\n\n\nEmail Preview: Versp\u00e4tete Ankunft morgen\n\n\n\n
\n
\n

Email Template Preview

\n
\n
\n

To: i.dittrich@valueon.ch

\n

Subject: Versp\u00e4tete Ankunft morgen

\n
\n

Hallo Ida,

\n

ich wollte dich nur kurz informieren, dass ich morgen etwas sp\u00e4ter ankommen werde. Ich hoffe, das ist in Ordnung.

\n

Bis dann!

\n

Viele Gr\u00fc\u00dfe

\n
\n
\n
\n

Dies ist eine Vorschau des E-Mail-Templates.

\n
\n
\n\n" -} \ No newline at end of file diff --git a/token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json b/token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json deleted file mode 100644 index 8a65bff5..00000000 --- a/token_storage/7d08aab9-a170-4975-8898-bc7e0a95488e.json +++ /dev/null @@ -1 +0,0 @@ -{"access_token": "eyJ0eXAiOiJKV1QiLCJub25jZSI6IlZXdlYzdDhtMTIyR19WNXF0ZU9sRGc0WjlBUkNBZkNCMHZER0hucmJWYlEiLCJhbGciOiJSUzI1NiIsIng1dCI6IkNOdjBPSTNSd3FsSEZFVm5hb01Bc2hDSDJYRSIsImtpZCI6IkNOdjBPSTNSd3FsSEZFVm5hb01Bc2hDSDJYRSJ9.eyJhdWQiOiIwMDAwMDAwMy0wMDAwLTAwMDAtYzAwMC0wMDAwMDAwMDAwMDAiLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC82YTUxYWFlYi0yNDY3LTQxODYtOTUwNC0yYTA1YWVkYzU5MWYvIiwiaWF0IjoxNzQ2NTk5NTA5LCJuYmYiOjE3NDY1OTk1MDksImV4cCI6MTc0NjYwNTE1NywiYWNjdCI6MCwiYWNyIjoiMSIsImFjcnMiOlsicDEiXSwiYWlvIjoiQWFRQVcvOFpBQUFBa2NUUldzUVFTZVV3eVY0cS91Z0w0NDhndEQwb1prZ3paKzgxVDdaN1k0VWhDV1RwREF6OUwrdlYvN2V5SW9sMG5zVXQvY2N1U3NuMjhXenlMd2szWWpvZGM3ajZrb2dGeW5hU0owcE0vTTl1VXM1NDMrQ3k4cDBZRExhTzF4djFCcDRmRVdkMUd3bDRsZ0VROUtFNndjazFMY25raWRTQUM0djA3V3k1RUw3SDV4MUNKY3cyOXYrcU9Dc1VDdkNIYnN0a2JGbzdoZ3NvY0w3b0ZuTVh3Zz09IiwiYW1yIjpbInB3ZCIsInJzYSIsIm1mYSJdLCJhcHBfZGlzcGxheW5hbWUiOiJQTSBUZXN0IC0gRW1haWwgRHJhZnQiLCJhcHBpZCI6ImM3ZTcxMTJkLTYxZGMtNGYzYS04Y2QzLTA4Y2M0Y2Q3NTA0YyIsImFwcGlkYWNyIjoiMSIsImRldmljZWlkIjoiOWE0YTM2OWEtNjBhOS00NjdlLWFjNTktODdkZGQyMDUxZGU5IiwiZmFtaWx5X25hbWUiOiJNb3RzY2giLCJnaXZlbl9uYW1lIjoiUGF0cmljayIsImlkdHlwIjoidXNlciIsImlwYWRkciI6IjE3OC4xOTcuMjIyLjE0OCIsIm5hbWUiOiJQYXRyaWNrIE1vdHNjaCIsIm9pZCI6IjdkMDhhYWI5LWExNzAtNDk3NS04ODk4LWJjN2UwYTk1NDg4ZSIsInBsYXRmIjoiMyIsInB1aWQiOiIxMDAzN0ZGRThDREQ2QTgyIiwicmgiOiIxLkFRc0E2NnBSYW1ja2hrR1ZCQ29GcnR4Wkh3TUFBQUFBQUFBQXdBQUFBQUFBQUFDRUFEQUxBQS4iLCJzY3AiOiJNYWlsLlJlYWRXcml0ZSBvcGVuaWQgcHJvZmlsZSBVc2VyLlJlYWQgZW1haWwiLCJzaWQiOiIyOTI0ZTgxMS0xMTM1LTQ0ZTItOGUxYi1kMmU2YmVhZmI3ZTUiLCJzaWduaW5fc3RhdGUiOlsia21zaSJdLCJzdWIiOiJJZzBpcDN4YWRiTGl1S3piRmd3VmhOSU1fRHpHMHdweGlFRmIySll1Y240IiwidGVuYW50X3JlZ2lvbl9zY29wZSI6IkVVIiwidGlkIjoiNmE1MWFhZWItMjQ2Ny00MTg2LTk1MDQtMmEwNWFlZGM1OTFmIiwidW5pcXVlX25hbWUiOiJwLm1vdHNjaEB2YWx1ZW9uLmNoIiwidXBuIjoicC5tb3RzY2hAdmFsdWVvbi5jaCIsInV0aSI6ImYzXy1ha2NKblVlQXhuM3o3NmdOQUEiLCJ2ZXIiOiIxLjAiLCJ3aWRzIjpbIjE1OGMwNDdhLWM5MDctNDU1Ni1iN2VmLTQ0NjU1MWE2YjVmNyIsIjliODk1ZDkyLTJjZDMtNDRjNy05ZDAyLWE2YWMyZDVlYTVjMyIsImNmMWMzOGU1LTM2MjEtNDAwNC1hN2NiLTg3OTYyNGRjZWQ3YyIsIjlmMDYyMDRkLTczYzEtNGQ0Yy04ODBhLTZlZGI5MDYwNmZkOCIsIjg5MmM1ODQyLWE5YTYtNDYzYS04MDQxLTcyYWEwOGNhM2NmNiIsImI3OWZiZjRkLTNlZjktNDY4OS04MTQzLTc2YjE5NGU4NTUwOSJdLCJ4bXNfZnRkIjoiMlN2YmlORzVSbGVucGhwdmM2SDdEU1R5WFF5UnpPTmJYOUtOQzFzZmRKSUJaWFZ5YjNCbGQyVnpkQzFrYzIxeiIsInhtc19pZHJlbCI6IjEgMTIiLCJ4bXNfc3QiOnsic3ViIjoiUjJ2RDBHMW1tYVlSQzdKWVdjSVNaVzJLRFBnTkJqQkxGbDZlTEFCX1BVTSJ9LCJ4bXNfdGNkdCI6MTQxODIxNDUwMSwieG1zX3RkYnIiOiJFVSJ9.dg3yuHyt--1GJh5mhnLy1mPkopsVhUTlPv3GpbRT9QcUcMgFnHqGqsU3Ht_hCG5XATy0fe1-cojzBTpYBuyIOBjEZtYpJb5fGcfd-lfuBxCKcYT-ApV5sfQgOEv-r5ki7OTI13MktZKrC4d63uTXmEAOOdoRsIG0UN-ZpM0iGwbWeRZdJV_2F-skZCCLpeOK63jItZkQ7spa8KH9VaU5070vSwDQEXVBuMmvDq70ql6Sw-oqlSzh-ea-pQAn0SoKVg23xMXWTvHgCFxjeveq6Q3vCsmThIXmWdQcQtWUIFYbRajW7ZMM_c1xsTOWyFMQEbEcOwmAqf93bAL_gF_DQw", "refresh_token": "1.AQsA66pRamckhkGVBCoFrtxZHy0R58fcYTpPjNMIzEzXUEyEADALAA.AgABAwEAAABVrSpeuWamRam2jAF1XRQEAwDs_wUA9P950c99CUJxojzQN3haIYdKnZObsofQW2RZsTO0E9apCt7LtcrCIp0xFEJkIYipSHIN1bAG6Jhhm4QiYb3XxIH7VtgmZSnZrZdf3QgZILRwjUKyFnFdjjmq0S0BO7InylLaZIJ2ZzOOPE8cY4xSXedyKEe3DC06Ejh7Zp9EhC6BlWdgGUCyyFNloDKv3xhUfqJ8GjQ91bo1OErWAFkH8N7CiD_f7XJQ55EV4dx7w7qHemN4aDDeG4uNjioMMuPDspIHcFZFZwzcgphHZO9uelRrlQMNEwQ8zNDNQk0f1Q_m09xGifHoMCszwoX2Z-ffaYtkcQjwGnEp4DsEQbCv_-03wHc5KlmJnTmOGgtCLSpfxl06qcyBqRVeA2cGCwhzmmF_Q81AkkAqM0MCFalT4z3b6dK3uxjs1Umu_wUa_lEKtEePKzQaeTDf1wCDWXo1UZ1oTeZESV6yGrnBnCiG6z4wRifqCdJpf6WWItYk_EyKV5Reh6kPMIret11Cacha2elopHxLTFFmEExvb2Mfu1z9NHZ_qBnXA-F05fDmvKickYspWu4CuQr2DhwJ74CD4IZ1dKFRiwHuYlw9HuTuBQjOdMy-FhAyGdhjTKHhkf4rh8GVjeza0DvCl5NJr04ubacnd2-_vGVoVNbsqUeqDWF9gKdT5Qnz5Aba9vFs4VYKjtnfrVEBEWZHZJsX5JzWVIzqfSVKmcE3ij2v9KGLw8kdcD16hfTN6wCCHYRdtMx5CVRhyBuj8KMBRRUtdEBFgG_jraeQTGoj6BnsdKPclM_TkPHhR8p-0KjfErJud-MFavGT9Y1cvsdr9TdsX_8o9y2LTcW8nXl3Vljnzq3RlZ6N4PoeQSzZNmri8MRpLuUFwJfAwxwuGemN_OIph7Npo3IQ1Tw9WeENGczplZWgbf2FdPITisdPylACrsWflH8mlfy1fEatstQb_2E2k-1vqCFYX8hYiSRbOS0kYWzYUBJ_yvRF2EUsu8yTbzw4SlJYjeNdAdmLcxjbdWXS0OS-aKv2QUvTi_htK0UKKm7V0Yj1I_bE", "user_info": {"name": "Patrick Motsch", "email": "p.motsch@valueon.ch"}, "timestamp": "2025-05-07T01:45:00.286453"} \ No newline at end of file diff --git a/tool_testBackendSingle.py b/tool_testBackendSingle.py deleted file mode 100644 index 371c6951..00000000 --- a/tool_testBackendSingle.py +++ /dev/null @@ -1,432 +0,0 @@ -#!/usr/bin/env python3 -""" -Simplified Test Runner for Workflow State Machine - -This script provides a clean and simple test runner for the workflow state machine -tests that properly handles async test methods. - -Usage: - python tool_testBackendSingle.py [test_name] - -Examples: - python tool_testBackendSingle.py # Run all tests - python tool_testBackendSingle.py test_state_1 # Run tests starting with test_state_1 -""" - -import os -import sys -import asyncio -import time -import traceback -import importlib -import inspect -from unittest.mock import patch, MagicMock, AsyncMock - -# Try to import colorama, install if not available -try: - from colorama import init, Fore, Back, Style - init() # Initialize colorama -except ImportError: - print("Installing required package: colorama") - import subprocess - subprocess.call([sys.executable, "-m", "pip", "install", "colorama"]) - from colorama import init, Fore, Back, Style - init() # Initialize colorama - - -class AsyncTestRunner: - """Simple test runner that supports async test methods""" - - def __init__(self): - """Initialize the test runner""" - self.success_count = 0 - self.failure_count = 0 - self.results = [] - self.total_time = 0 - - def print_header(self, test_case_name): - """Print a header for the test suite""" - print("\n" + "=" * 80) - print(f"{Fore.CYAN}{Style.BRIGHT}{test_case_name}{Style.RESET_ALL}") - print("=" * 80) - - def print_result(self, test_name, success, duration, error=None): - """Print a test result with appropriate formatting""" - clean_name = test_name.replace('test_', '').replace('_', ' ').title() - - if success: - status = f"{Fore.GREEN}[PASS]{Style.RESET_ALL}" - self.success_count += 1 - else: - status = f"{Fore.RED}[FAIL]{Style.RESET_ALL}" - self.failure_count += 1 - - # Print result line - print(f"{status} {clean_name} - {duration:.2f}s") - - # Print error if any - if error: - print(f" {Fore.RED}→ {error}{Style.RESET_ALL}") - if isinstance(error, Exception): - traceback.print_exception(type(error), error, error.__traceback__) - - # Store result - self.results.append({ - 'name': clean_name, - 'success': success, - 'duration': duration, - 'error': error - }) - - def print_summary(self): - """Print a summary of test results""" - print("\n" + "=" * 80) - print(f"{Fore.CYAN}{Style.BRIGHT}TEST SUMMARY{Style.RESET_ALL}") - print("-" * 80) - - # Print timing - print(f"Total execution time: {self.total_time:.2f}s") - - # Print counts - total = self.success_count + self.failure_count - print(f"Tests: {total}, Passed: {Fore.GREEN}{self.success_count}{Style.RESET_ALL}, Failed: {Fore.RED}{self.failure_count}{Style.RESET_ALL}") - - # Print overall status - if self.failure_count == 0: - print(f"\n{Fore.GREEN}{Style.BRIGHT}✓ ALL TESTS PASSED{Style.RESET_ALL}") - else: - print(f"\n{Fore.RED}{Style.BRIGHT}✗ TESTS FAILED{Style.RESET_ALL}") - - # Print failures - print(f"\n{Fore.RED}Failed tests:{Style.RESET_ALL}") - for result in self.results: - if not result['success']: - print(f" - {result['name']}") - - print("=" * 80) - - async def run_test(self, test_instance, test_method): - """Run a single test method (sync or async)""" - # Prepare test - test_name = test_method.__name__ - clean_name = test_name.replace('test_', '').replace('_', ' ').title() - - # Print start - print(f"\n{Fore.BLUE}[RUNNING]{Style.RESET_ALL} {clean_name}...") - - # Run setUp - if hasattr(test_instance, 'setUp'): - await self.run_method_with_instance(test_instance, test_instance.setUp) - - # Time the test execution - start_time = time.time() - success = True - error = None - - try: - # Run the test - ensure bound method gets called with instance - if hasattr(test_method, '__self__') and test_method.__self__ is None: - # This is an unbound method, bind it to the instance - bound_method = getattr(test_instance, test_method.__name__) - await self.run_method_with_instance(test_instance, bound_method) - else: - # This is already a bound method - await self.run_method_with_instance(test_instance, test_method) - except Exception as e: - success = False - error = e - - # Calculate duration - duration = time.time() - start_time - - # Run tearDown - if hasattr(test_instance, 'tearDown'): - await self.run_method_with_instance(test_instance, test_instance.tearDown) - - # Record and print result - self.print_result(test_name, success, duration, error) - - return success - - - async def run_method_with_instance(self, instance, method): - """Run a method ensuring it has the correct instance""" - method_name = method.__name__ - bound_method = getattr(instance, method_name) - - if asyncio.iscoroutinefunction(bound_method): - return await bound_method() - else: - return bound_method() - - async def run_method(self, method): - """Run a method that might be async or regular""" - # Check if this is an unbound method that needs self - if hasattr(method, '__self__') and method.__self__ is None: - # This suggests it's an unbound method that needs an instance - raise TypeError(f"Method {method.__name__} appears to be unbound and needs 'self'") - - if asyncio.iscoroutinefunction(method): - return await method() - else: - return method() - - - def _reset_mocks(self): - """Reset all mocks for a fresh test""" - # Only reset if the objects have reset_mock method - if hasattr(self.mydom_mock, 'reset_mock'): - self.mydom_mock.reset_mock() - else: - # Recreate the mock objects - self._setup_mocks() - - if hasattr(self.registry_mock, 'reset_mock'): - self.registry_mock.reset_mock() - - if hasattr(self.getDocumentContents_mock, 'reset_mock'): - self.getDocumentContents_mock.reset_mock() - - - - async def run_test_case(self, test_case_class, filter_pattern=None): - """Run all test methods in a test case class""" - # Initialize timing - start_time = time.time() - - # Print header - self.print_header(test_case_class.__name__) - - # Get all test methods - test_methods = sorted([ - getattr(test_case_class, name) for name in dir(test_case_class) - if name.startswith('test_') and callable(getattr(test_case_class, name)) - ], key=lambda x: x.__name__) - - # Filter tests if pattern provided - if filter_pattern: - test_methods = [ - method for method in test_methods - if filter_pattern in method.__name__ - ] - - if not test_methods: - print(f"{Fore.YELLOW}No tests found{Style.RESET_ALL}") - return - - print(f"Running {len(test_methods)} tests...\n") - - # Run each test - for test_method in test_methods: - # Create a fresh instance for each test - test_instance = test_case_class() - await self.run_test(test_instance, test_method) - - # Record total time - self.total_time = time.time() - start_time - - # Print summary - self.print_summary() - - return self.failure_count == 0 - - -def setup_module_paths(): - """Set up module paths to make imports work""" - # Add current directory and parent directory to path - current_dir = os.path.dirname(os.path.abspath(__file__)) - parent_dir = os.path.dirname(current_dir) - - if current_dir not in sys.path: - sys.path.insert(0, current_dir) - if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - - # Also add any module directories that might exist - modules_dir = os.path.join(parent_dir, 'modules') - if os.path.exists(modules_dir) and modules_dir not in sys.path: - sys.path.insert(0, modules_dir) - - gateway_dir = os.path.join(parent_dir, 'gateway') - if os.path.exists(gateway_dir) and gateway_dir not in sys.path: - sys.path.insert(0, gateway_dir) - - print(f"{Fore.CYAN}Python path set to:{Style.RESET_ALL}") - for path in sys.path[:5]: # Print first 5 paths - print(f" - {path}") - - -def find_test_files(): - """Find test files in the current directory""" - # Look for test files in priority order - test_files = [] - - # First priority: test_workflow_state_machine.py - if os.path.exists('./test_workflow_state_machine.py'): - test_files.append('test_workflow_state_machine.py') - - # Second priority: any tool_test*.py files - tool_test_files = [f for f in os.listdir('.') if f.startswith('tool_test') and f.endswith('.py') and f != 'tool_testBackendSingle.py'] - test_files.extend(tool_test_files) - - # Last priority: any test_*.py files - other_test_files = [f for f in os.listdir('.') if f.startswith('test_') and f.endswith('.py') and f not in test_files] - test_files.extend(other_test_files) - - return test_files - - -async def run_tests(test_file=None, test_filter=None): - """Run all tests""" - # Set up paths - setup_module_paths() - - # Find test files if not specified - if not test_file: - test_files = find_test_files() - if not test_files: - print(f"{Fore.RED}No test files found{Style.RESET_ALL}") - return False - test_file = test_files[0] - print(f"{Fore.YELLOW}Found test files: {', '.join(test_files)}{Style.RESET_ALL}") - print(f"{Fore.YELLOW}Using: {test_file}{Style.RESET_ALL}") - - # Remove .py extension for import - module_name = test_file[:-3] if test_file.endswith('.py') else test_file - - try: - # First try a normal import - print(f"{Fore.YELLOW}Attempting to import module: {module_name}{Style.RESET_ALL}") - test_module = importlib.import_module(module_name) - print(f"{Fore.GREEN}Successfully imported test module: {module_name}{Style.RESET_ALL}") - except ImportError as e: - print(f"{Fore.RED}Error importing module {module_name}: {e}{Style.RESET_ALL}") - - # Try different import approaches - try: - # Try to load as a relative module - print(f"{Fore.YELLOW}Trying relative import...{Style.RESET_ALL}") - test_module = importlib.import_module('.' + module_name, package=__package__) - print(f"{Fore.GREEN}Imported test module via relative import: {module_name}{Style.RESET_ALL}") - except ImportError as e: - print(f"{Fore.RED}Relative import failed: {e}{Style.RESET_ALL}") - - # Fall back to exec (not recommended but sometimes necessary) - print(f"{Fore.YELLOW}Attempting to load using exec: {test_file}{Style.RESET_ALL}") - try: - with open(test_file, 'r') as f: - module_content = f.read() - # Create a new module namespace - module_namespace = {} - # Execute the module code in the namespace - exec(module_content, module_namespace) - - # Create a mock module - class MockModule: - pass - - test_module = MockModule() - - # Copy the relevant attributes to the mock module - for key, value in module_namespace.items(): - setattr(test_module, key, value) - - print(f"{Fore.GREEN}Loaded test module using exec: {test_file}{Style.RESET_ALL}") - except Exception as e: - print(f"{Fore.RED}Failed to load module using exec: {e}{Style.RESET_ALL}") - traceback.print_exc() - return False - - # Find test case class - test_case_class = None - print(f"{Fore.YELLOW}Looking for test case class in module...{Style.RESET_ALL}") - for item_name in dir(test_module): - item = getattr(test_module, item_name) - if inspect.isclass(item) and (item_name.startswith('Test') or 'Test' in item_name): - print(f"{Fore.GREEN}Found test case class: {item_name}{Style.RESET_ALL}") - test_case_class = item - break - - if not test_case_class: - print(f"{Fore.RED}No test case class found in {test_file}{Style.RESET_ALL}") - return False - - # Try to check for required imports - try: - print(f"{Fore.YELLOW}Checking for agent registry...{Style.RESET_ALL}") - try: - # First try direct import - from modules.workflowAgentsRegistry import getAgentRegistry - print(f"{Fore.GREEN}Successfully imported getAgentRegistry{Style.RESET_ALL}") - except ImportError: - try: - # Try alternate path - from modules.workflowAgentsRegistry import getAgentRegistry - print(f"{Fore.GREEN}Successfully imported getAgentRegistry from modules{Style.RESET_ALL}") - except ImportError: - print(f"{Fore.YELLOW}Agent registry import not found - may cause issues{Style.RESET_ALL}") - except Exception as e: - print(f"{Fore.YELLOW}Error checking agent registry: {e}{Style.RESET_ALL}") - - # Run the tests - print(f"{Fore.CYAN}Starting test execution{Style.RESET_ALL}") - runner = AsyncTestRunner() - return await runner.run_test_case(test_case_class, test_filter) - - - -if __name__ == "__main__": - # Get test filter from command line - test_file = None - test_filter = None - - if len(sys.argv) > 1: - # Check if first arg is a file - if os.path.exists(sys.argv[1]) or sys.argv[1].endswith('.py'): - test_file = sys.argv[1] - if len(sys.argv) > 2: - test_filter = sys.argv[2] - else: - test_filter = sys.argv[1] - - # Run tests - asyncio.run(run_tests(test_file, test_filter)) - - -class MockDomInterface: - def __init__(self, *args, **kwargs): - self.getWorkflow = MagicMock(return_value=None) - self.loadWorkflowState = MagicMock(return_value=None) - self.createWorkflow = MagicMock() - self.updateWorkflow = MagicMock() - self.createWorkflowLog = MagicMock() - self.createWorkflowMessage = MagicMock() - self.getFile = MagicMock() - self.getFileData = MagicMock() - self.saveUploadedFile = MagicMock() - self.userLanguage = "en" - self.callAi = AsyncMock() - self.setUserLanguage = MagicMock() - - def reset_mock(self): - """Reset all mocks in this interface""" - for attr_name in dir(self): - attr = getattr(self, attr_name) - if hasattr(attr, 'reset_mock'): - attr.reset_mock() - - -class MockAgentRegistry: - def __init__(self): - self.getAgent = MagicMock() - self.getAgentInfos = MagicMock(return_value=[ - {"name": "test_agent", "description": "Test agent", "capabilities": ["text_processing"]} - ]) - self.setMydom = MagicMock() - - def reset_mock(self): - """Reset all mocks in this registry""" - for attr_name in dir(self): - attr = getattr(self, attr_name) - if hasattr(attr, 'reset_mock'): - attr.reset_mock() \ No newline at end of file diff --git a/tool_testData.py b/tool_testData.py deleted file mode 100644 index 9b5228a0..00000000 --- a/tool_testData.py +++ /dev/null @@ -1,1064 +0,0 @@ -""" -Test Module for Workflow State Machine - -This script tests each state of the workflow state machine implementation -from initialization to completion, including error scenarios. - -Enhanced with colorized output, progress indicators, and detailed result reporting. -""" - -import os -import sys -import uuid -import json -import base64 -import asyncio -import unittest -from unittest.mock import patch, MagicMock, AsyncMock -from datetime import datetime, timedelta -from typing import Dict, List, Any -import time -import traceback - -# Try to import colorama, install if not available -try: - from colorama import init, Fore, Back, Style - init() # Initialize colorama -except ImportError: - print("Installing required package: colorama") - import subprocess - subprocess.call([sys.executable, "-m", "pip", "install", "colorama"]) - from colorama import init, Fore, Back, Style - init() # Initialize colorama - -# Add parent directory to path for imports if needed -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.dirname(current_dir) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - -# Mock modules for testing environment -class MockDomInterface: - def __init__(self, *args, **kwargs): - self.getWorkflow = MagicMock(return_value=None) - self.loadWorkflowState = MagicMock(return_value=None) - self.createWorkflow = MagicMock() - self.updateWorkflow = MagicMock() - self.createWorkflowLog = MagicMock() - self.createWorkflowMessage = MagicMock() - self.getFile = MagicMock() - self.getFileData = MagicMock() - self.saveUploadedFile = MagicMock() - self.userLanguage = "en" - self.callAi = AsyncMock() - self.setUserLanguage = MagicMock() - - def reset_mock(self): - """Reset all mocks in this interface""" - for attr_name in dir(self): - attr = getattr(self, attr_name) - if hasattr(attr, 'reset_mock'): - attr.reset_mock() - -class MockAgentRegistry: - def __init__(self): - self.getAgent = MagicMock() - self.getAgentInfos = MagicMock(return_value=[ - {"name": "test_agent", "description": "Test agent", "capabilities": ["text_processing"]} - ]) - self.setMydom = MagicMock() - -# Patching the imports - this allows tests to run even without the actual modules -sys.modules['modules.lucydomInterface'] = MagicMock() -sys.modules['modules.lucydomInterface'].getLucydomInterface = MagicMock(return_value=MockDomInterface()) -sys.modules['gateway.modules.workflowAgentsRegistry'] = MagicMock() -sys.modules['gateway.modules.workflowAgentsRegistry'].getAgentRegistry = MagicMock(return_value=MockAgentRegistry()) -sys.modules['modules.documentProcessor'] = MagicMock() -sys.modules['modules.documentProcessor'].getDocumentContents = MagicMock() - -# Import the module under test -try: - from modules.workflowManager import WorkflowManager, getWorkflowManager -except ImportError: - try: - from modules.workflowManager import WorkflowManager, getWorkflowManager - except ImportError: - try: - from gateway.modules.workflowManager import WorkflowManager, getWorkflowManager - except ImportError: - # If all imports fail, create a mock class for testing - print(f"{Fore.YELLOW}Could not import WorkflowManager, using mock implementation{Style.RESET_ALL}") - class WorkflowManager: - def __init__(self, mandateId, userId): - self.mandateId = mandateId - self.userId = userId - self.mydom = MockDomInterface(mandateId, userId) - self.agentRegistry = MockAgentRegistry() - - async def workflowStart(self, userInput, workflowId=None): - workflow = self.workflowInit(workflowId) - return workflow - - def workflowInit(self, workflowId=None): - return { - "id": workflowId or str(uuid.uuid4()), - "mandateId": self.mandateId, - "userId": self.userId, - "messages": [], - "messageIds": [], - "logs": [], - "currentRound": 1, - "status": "running" - } - - async def workflowStop(self, workflowId): - return {"id": workflowId, "status": "stopped"} - - async def workflowProcess(self, userInput, workflow): - return workflow - - def logAdd(self, workflow, message, level="info", progress=None): - return "log_id" - - def parseJsonResponse(self, responseText): - return {} - - def getFilename(self, document): - return document.get("name", "") + "." + document.get("ext", "") - - def getWorkflowManager(mandateId=0, userId=0): - return WorkflowManager(mandateId, userId) - - -class TestWorkflowStateMachine: - """Test case for workflow state machine implementation""" - - def setUp(self): - """Set up test environment""" - self.mandateId = 1 - self.userId = 1 - - # Create mocks - self._setup_mocks() - - # Create manager with mocked dependencies - self.manager = self._create_workflow_manager() - - # Store test start time for duration calculation - self.test_start_time = time.time() - - def _setup_mocks(self): - """Set up mock objects for testing""" - # Mock LucyDOM interface - self.mydom_mock = MockDomInterface() - self.mydom_mock.callAi = AsyncMock() - self.mydom_mock.loadWorkflowState.return_value = None # Default to no existing workflow - self.mydom_mock.getWorkflow.return_value = None # Default to no existing workflow - self.mydom_mock.userLanguage = "en" # Default language - - # Mock AgentRegistry - self.registry_mock = MockAgentRegistry() - test_agent = MagicMock() - test_agent.processTask = AsyncMock() - test_agent.mydom = self.mydom_mock - self.registry_mock.getAgent.return_value = test_agent - self.registry_mock.getAgentInfos.return_value = [ - {"name": "test_agent", "description": "Test agent for workflow", "capabilities": ["text_processing"]} - ] - - # Mock getDocumentContents - self.getDocumentContents_mock = MagicMock() - self.getDocumentContents_mock.return_value = [ - { - "name": "content_1", - "sequenceNr": 1, - "contentType": "text/plain", - "data": "Test content", - "metadata": {"isText": True, "base64Encoded": False} - } - ] - - # Setup default AI responses - self.ai_responses = { - "project_manager": json.dumps({ - "objFinalDocuments": ["result.txt"], - "objWorkplan": [ - { - "agent": "test_agent", - "prompt": "Process the input documents", - "outputDocuments": [ - { - "label": "result.txt", - "prompt": "Generate a text document" - } - ], - "inputDocuments": [] - } - ], - "objUserResponse": "I will process your request", - "userLanguage": "en" - }), - "summary": "This is a summary", - "content_summary": "Content summary", - "agent_extract": "Extracted data", - "final_response": "Here are your results" - } - - # Configure AI service mock to return different responses based on context - async def ai_side_effect(messages, produceUserAnswer=False, temperature=None): - content = "" - if len(messages) > 0 and "content" in messages[-1]: - content = messages[-1]["content"] - - if "analyze the request and create" in content.lower(): - return self.ai_responses["project_manager"] - elif "summarize this" in content.lower(): - return self.ai_responses["content_summary"] - elif "review the promised" in content.lower() or "final" in content.lower(): - return self.ai_responses["final_response"] - elif "extracting information" in content.lower(): - return self.ai_responses["agent_extract"] - else: - return self.ai_responses["summary"] - - self.mydom_mock.callAi.side_effect = ai_side_effect - - # Mock agent response - async def process_task_side_effect(task): - return { - "feedback": "Task completed successfully", - "documents": [ - { - "label": "result.txt", - "content": "Generated content" - } - ] - } - - test_agent.processTask.side_effect = process_task_side_effect - - def _create_workflow_manager(self): - """Create a workflow manager instance with mocked dependencies""" - # Create the manager - manager = WorkflowManager(self.mandateId, self.userId) - - # Inject mocks - manager.mydom = self.mydom_mock - manager.agentRegistry = self.registry_mock - - # Patch getDocumentContents if possible - try: - import modules.documentProcessor - modules.documentProcessor.getDocumentContents = self.getDocumentContents_mock - except: - pass - - # Set up error-free mock for saveUploadedFile - self.mydom_mock.saveUploadedFile.return_value = {"id": 1, "name": "test.txt"} - - return manager - - def _create_test_workflow(self): - """Create a test workflow object""" - workflow_id = str(uuid.uuid4()) - current_time = datetime.now().isoformat() - - return { - "id": workflow_id, - "mandateId": self.mandateId, - "userId": self.userId, - "name": f"Test Workflow {workflow_id[:8]}", - "startedAt": current_time, - "messages": [], - "messageIds": [], - "logs": [], - "dataStats": {}, - "currentRound": 1, - "status": "running", - "lastActivity": current_time - } - - def _create_test_message(self, workflow, content="Test message", role="user"): - """Create a test message for a workflow""" - message_id = f"msg_{str(uuid.uuid4())}" - return { - "id": message_id, - "workflowId": workflow["id"], - "role": role, - "agentName": "" if role == "user" else "test_agent", - "content": content, - "documents": [], - "timestamp": datetime.now().isoformat(), - "sequenceNo": len(workflow.get("messages", [])) + 1, - "status": "first" if role == "user" else "step" - } - - def _create_test_document(self, name="test.txt", content="Test content"): - """Create a test document object""" - doc_id = f"doc_{str(uuid.uuid4())}" - return { - "id": doc_id, - "fileId": 1, - "name": os.path.splitext(name)[0], - "ext": os.path.splitext(name)[1][1:] if os.path.splitext(name)[1] else "txt", - "data": base64.b64encode(content.encode('utf-8')).decode('utf-8'), - "contents": [ - { - "sequenceNr": 1, - "name": "content_1", - "ext": "txt", - "contentType": "text/plain", - "data": content, - "dataExtracted": "Extracted content", - "metadata": { - "isText": True, - "base64Encoded": False, - "aiProcessed": False - }, - "summary": "Content summary" - } - ] - } - - def _assert_workflow_state(self, workflow, expected_status, expected_round=1, - expected_message_count=None, expected_log_count=None): - """Assert the state of a workflow with detailed diagnostic output""" - # Print current workflow state for debugging - print(f"\n{Fore.CYAN}Workflow State Verification:{Style.RESET_ALL}") - print(f" Status: {Fore.YELLOW}{workflow.get('status', 'N/A')}{Style.RESET_ALL} (Expected: {Fore.GREEN}{expected_status}{Style.RESET_ALL})") - print(f" Round: {Fore.YELLOW}{workflow.get('currentRound', 'N/A')}{Style.RESET_ALL} (Expected: {Fore.GREEN}{expected_round}{Style.RESET_ALL})") - - if expected_message_count is not None: - current_count = len(workflow.get("messages", [])) - print(f" Messages: {Fore.YELLOW}{current_count}{Style.RESET_ALL} (Expected: {Fore.GREEN}{expected_message_count}{Style.RESET_ALL})") - - if expected_log_count is not None: - current_count = len(workflow.get("logs", [])) - print(f" Logs: {Fore.YELLOW}{current_count}{Style.RESET_ALL} (Expected: {Fore.GREEN}{expected_log_count}{Style.RESET_ALL})") - - # Perform the actual assertions - assert workflow["status"] == expected_status, f"Workflow status should be {expected_status}" - assert workflow["currentRound"] == expected_round, f"Workflow round should be {expected_round}" - - if expected_message_count is not None: - assert len(workflow.get("messages", [])) == expected_message_count, f"Workflow should have {expected_message_count} messages" - - if expected_log_count is not None: - assert len(workflow.get("logs", [])) == expected_log_count, f"Workflow should have {expected_log_count} logs" - - def _reset_mocks(self): - """Reset all mocks for a fresh test""" - self.mydom_mock.reset_mock() - self.registry_mock.reset_mock() - self.getDocumentContents_mock.reset_mock() - - # ------------------------------------------------------------------------ - # TESTS FOR EACH STATE - # ------------------------------------------------------------------------ - - def _print_test_info(self, state_num, state_name, description=None): - """Print formatted information about the current test state""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}STATE {state_num}: {state_name}{Style.RESET_ALL}") - if description: - print(f"{Fore.WHITE}{description}{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - async def test_state_1_workflow_initialization_new(self): - """Test State 1: Workflow Initialization (new workflow)""" - self._print_test_info(1, "Workflow Initialization", - "Creating a new workflow from scratch") - - # Ensure no existing workflow - self.mydom_mock.getWorkflow.return_value = None - self.mydom_mock.loadWorkflowState.return_value = None - - print(f"{Fore.YELLOW}➤ Initializing new workflow...{Style.RESET_ALL}") - # Initialize a new workflow - workflow = self.manager.workflowInit() - - # Print workflow details - print(f"{Fore.GREEN}✓ Workflow created:{Style.RESET_ALL}") - print(f" ID: {workflow.get('id', 'N/A')}") - print(f" Mandate: {workflow.get('mandateId', 'N/A')}") - print(f" User: {workflow.get('userId', 'N/A')}") - print(f" Round: {workflow.get('currentRound', 'N/A')}") - print(f" Status: {workflow.get('status', 'N/A')}") - - # Assert workflow state - print(f"{Fore.YELLOW}➤ Validating workflow state...{Style.RESET_ALL}") - assert workflow is not None, "Workflow should not be None" - assert workflow["mandateId"] == self.mandateId, f"Workflow mandate ID should be {self.mandateId}" - assert workflow["userId"] == self.userId, f"Workflow user ID should be {self.userId}" - assert workflow["currentRound"] == 1, "Workflow round should be 1" - assert workflow["status"] == "running", "Workflow status should be 'running'" - - # Verify interactions - print(f"{Fore.YELLOW}➤ Verifying database interactions...{Style.RESET_ALL}") - self.mydom_mock.createWorkflow.assert_called_once() - self.mydom_mock.getWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Database correctly updated{Style.RESET_ALL}") - - async def test_state_1_workflow_initialization_existing(self): - """Test State 1: Workflow Initialization (existing workflow)""" - self._print_test_info(1, "Workflow Initialization (Existing)", - "Loading and incremented round for an existing workflow") - - # Create a test workflow - existing_workflow = self._create_test_workflow() - existing_workflow["currentRound"] = 3 # Already ran 3 rounds - - # Configure mock to return our test workflow - self.mydom_mock.getWorkflow.return_value = existing_workflow - - print(f"{Fore.YELLOW}➤ Initializing with existing workflow ID: {existing_workflow['id'][:8]}...{Style.RESET_ALL}") - # Initialize with existing workflowId - workflow = self.manager.workflowInit(existing_workflow["id"]) - - # Assert workflow state - print(f"{Fore.GREEN}✓ Workflow loaded:{Style.RESET_ALL}") - print(f" ID: {workflow.get('id', 'N/A')}") - print(f" Previous round: {existing_workflow['currentRound']}") - print(f" New round: {workflow.get('currentRound', 'N/A')}") - print(f" Status: {workflow.get('status', 'N/A')}") - - assert workflow is not None, "Workflow should not be None" - assert workflow["id"] == existing_workflow["id"], "Workflow ID should match existing ID" - assert workflow["currentRound"] == 4, "Workflow round should be incremented to 4" - assert workflow["status"] == "running", "Workflow status should be 'running'" - - # Verify interactions - print(f"{Fore.YELLOW}➤ Verifying database interactions...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Database correctly updated with incremented round{Style.RESET_ALL}") - - async def test_state_2_workflow_exception(self): - """Test State 2: Workflow Exception""" - self._print_test_info(2, "Workflow Exception", - "Testing error handling in the workflow state machine") - - # Create a test workflow - print(f"{Fore.YELLOW}➤ Creating test workflow...{Style.RESET_ALL}") - workflow = self._create_test_workflow() - print(f"{Fore.GREEN}✓ Created workflow with ID: {workflow['id']}{Style.RESET_ALL}") - - # Simulate an exception in workflow processing - print(f"{Fore.YELLOW}➤ Setting up exception scenario...{Style.RESET_ALL}") - def raise_exception(*args, **kwargs): - print(f"{Fore.RED} [INJECTED ERROR] Raising test exception{Style.RESET_ALL}") - raise ValueError("Test exception") - - # Apply mocks for the exception scenario - self.mydom_mock.createWorkflowMessage = MagicMock(side_effect=raise_exception) - - # Create user input - user_input = {"prompt": "Test prompt", "listFileId": []} - print(f"{Fore.GREEN}✓ Exception scenario prepared{Style.RESET_ALL}") - - # Run workflow and expect exception to be caught - print(f"{Fore.YELLOW}➤ Running workflow with exception...{Style.RESET_ALL}") - try: - result = await self.manager.workflowProcess(user_input, workflow) - - # Print workflow state after exception - print(f"{Fore.GREEN}✓ Exception handled correctly{Style.RESET_ALL}") - print(f" Workflow status: {Fore.YELLOW}{result['status']}{Style.RESET_ALL}") - - # Print log entries - logs = result.get("logs", []) - if logs: - print(f" Error logs:") - for log in logs: - if "failed" in log.get("message", "").lower(): - print(f" {Fore.RED}→ {log.get('message')}{Style.RESET_ALL}") - - # Verify workflow is marked as failed - print(f"{Fore.YELLOW}➤ Validating workflow state after exception...{Style.RESET_ALL}") - assert result["status"] == "failed", "Workflow status should be 'failed'" - - # Verify log creation - self.mydom_mock.createWorkflowLog.assert_called() - - # Verify database update - print(f"{Fore.YELLOW}➤ Verifying database was updated correctly...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called() - print(f"{Fore.GREEN}✓ Database correctly updated with failure status{Style.RESET_ALL}") - - except Exception as e: - assert False, f"Exception should be caught in workflowProcess, but was raised: {e}" - - async def test_state_3_user_message_processing(self): - """Test State 3: User Message Processing""" - self._print_test_info(3, "User Message Processing", - "Processing user input into a workflow message with documents") - - # Create a test workflow - workflow = self._create_test_workflow() - - # Create test user input - user_input = {"prompt": "Please analyze this document", "listFileId": [1]} - - # Configure file processing mock - test_document = self._create_test_document() - self.mydom_mock.getFile.return_value = {"name": "test.txt", "mandateId": self.mandateId} - self.mydom_mock.getFileData.return_value = b"Test content" - - print(f"{Fore.YELLOW}➤ Processing user message...{Style.RESET_ALL}") - # Process user message - message = await self.manager.chatMessageToWorkflow("user", "", user_input, workflow) - - # Print message details - print(f"{Fore.GREEN}✓ Message processed:{Style.RESET_ALL}") - print(f" Role: {message.get('role', 'N/A')}") - print(f" Content: {message.get('content', 'N/A')}") - print(f" Status: {message.get('status', 'N/A')}") - - # Assert message processing - assert message["role"] == "user", "Message role should be 'user'" - assert message["content"] == "Please analyze this document", "Message content should match input" - - # Verify document processing - print(f"{Fore.YELLOW}➤ Verifying document processing...{Style.RESET_ALL}") - self.mydom_mock.getFile.assert_called() - self.mydom_mock.getFileData.assert_called() - - # Verify the message was added to the workflow - print(f"{Fore.YELLOW}➤ Verifying message added to workflow...{Style.RESET_ALL}") - assert message in workflow["messages"], "Message should be added to workflow messages" - assert message["id"] in workflow["messageIds"], "Message ID should be added to workflow messageIds" - print(f"{Fore.GREEN}✓ Message successfully added to workflow{Style.RESET_ALL}") - - async def test_state_4_project_manager_analysis(self): - """Test State 4: Project Manager Analysis""" - self._print_test_info(4, "Project Manager Analysis", - "Analyzing user request and planning the workflow") - - # Create a test workflow - workflow = self._create_test_workflow() - - # Create user message - user_message = self._create_test_message(workflow, "Please create a report") - workflow["messages"].append(user_message) - - print(f"{Fore.YELLOW}➤ Running project manager analysis...{Style.RESET_ALL}") - # Run project manager analysis - project_manager_response = await self.manager.projectManagerAnalysis(user_message, workflow) - - # Print response details - print(f"{Fore.GREEN}✓ Project manager analysis completed:{Style.RESET_ALL}") - print(f" Final docs: {project_manager_response.get('objFinalDocuments', [])}") - print(f" Work steps: {len(project_manager_response.get('objWorkplan', []))}") - print(f" User lang: {project_manager_response.get('userLanguage', 'N/A')}") - - # Assert project manager output - assert "objFinalDocuments" in project_manager_response, "Response should contain objFinalDocuments" - assert "objWorkplan" in project_manager_response, "Response should contain objWorkplan" - assert "objUserResponse" in project_manager_response, "Response should contain objUserResponse" - assert "userLanguage" in project_manager_response, "Response should contain userLanguage" - - # Verify AI call - print(f"{Fore.YELLOW}➤ Verifying AI service call...{Style.RESET_ALL}") - self.mydom_mock.callAi.assert_called_once() - print(f"{Fore.GREEN}✓ AI service correctly called for analysis{Style.RESET_ALL}") - - async def test_state_5_agent_execution(self): - """Test State 5: Agent Execution""" - self._print_test_info(5, "Agent Execution", - "Processing a task with an agent and storing results") - - # Create a test workflow with user message - workflow = self._create_test_workflow() - user_message = self._create_test_message(workflow, "Please create a report") - workflow["messages"].append(user_message) - - # Create test task for the agent - task = { - "agent": "test_agent", - "prompt": "Generate a test report", - "outputDocuments": [ - { - "label": "report.txt", - "prompt": "Create a detailed report" - } - ], - "inputDocuments": [] - } - - print(f"{Fore.YELLOW}➤ Executing agent task...{Style.RESET_ALL}") - # Execute the agent - results = await self.manager.agentProcessing(task, workflow) - - # Print results - print(f"{Fore.GREEN}✓ Agent task executed:{Style.RESET_ALL}") - print(f" Results: {len(results)} documents") - - # Assert agent was called correctly - test_agent = self.registry_mock.getAgent.return_value - test_agent.processTask.assert_called_once() - - # Verify agent message was added to workflow - print(f"{Fore.YELLOW}➤ Verifying workflow state after agent execution...{Style.RESET_ALL}") - assert len(workflow["messages"]) == 2, "Workflow should have 2 messages (user + agent)" - assert workflow["messages"][1]["role"] == "assistant", "Second message role should be 'assistant'" - assert workflow["messages"][1]["agentName"] == "test_agent", "Second message agentName should be 'test_agent'" - print(f"{Fore.GREEN}✓ Agent message correctly added to workflow{Style.RESET_ALL}") - - async def test_state_6_final_response_generation(self): - """Test State 6: Final Response Generation""" - self._print_test_info(6, "Final Response Generation", - "Creating the final user-facing response message") - - # Create a test workflow - workflow = self._create_test_workflow() - - # Set up test data - obj_user_response = "I will process your request" - obj_final_documents = ["report.txt"] - - # Create a test document result - doc_result = self._create_test_document("report.txt", "Report content") - obj_results = [doc_result] - - print(f"{Fore.YELLOW}➤ Generating final message...{Style.RESET_ALL}") - # Generate final message - final_message = await self.manager.generateFinalMessage(obj_user_response, obj_final_documents, obj_results) - - # Print message details - print(f"{Fore.GREEN}✓ Final message generated:{Style.RESET_ALL}") - print(f" Role: {final_message.get('role', 'N/A')}") - print(f" Agent: {final_message.get('agentName', 'N/A')}") - print(f" Content: {final_message.get('content', 'N/A')[:50]}...") - - # Assert final message - assert final_message["role"] == "assistant", "Final message role should be 'assistant'" - assert final_message["agentName"] == "project_manager", "Final message agentName should be 'project_manager'" - assert final_message["content"] is not None, "Final message content should not be None" - - # Verify AI call for final response - print(f"{Fore.YELLOW}➤ Verifying AI service call...{Style.RESET_ALL}") - self.mydom_mock.callAi.assert_called_once() - print(f"{Fore.GREEN}✓ AI service correctly called for final response{Style.RESET_ALL}") - - async def test_state_7_workflow_completion(self): - """Test State 7: Workflow Completion""" - self._print_test_info(7, "Workflow Completion", - "Finalizing workflow and setting status to completed") - - # Create a test workflow - workflow = self._create_test_workflow() - - print(f"{Fore.YELLOW}➤ Finishing workflow...{Style.RESET_ALL}") - # Finalize the workflow - result = self.manager.workflowFinish(workflow) - - # Print result details - print(f"{Fore.GREEN}✓ Workflow finished:{Style.RESET_ALL}") - print(f" Status: {result.get('status', 'N/A')}") - print(f" Last activity: {result.get('lastActivity', 'N/A')[:19]}") - - # Assert workflow state - assert result["status"] == "completed", "Workflow status should be 'completed'" - - # Verify database update - print(f"{Fore.YELLOW}➤ Verifying database update...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Database correctly updated with completion status{Style.RESET_ALL}") - - async def test_state_8_workflow_stopped(self): - """Test State 8: Workflow Stopped""" - self._print_test_info(8, "Workflow Stopped", - "Testing the workflow stop function") - - # Create a test workflow - workflow = self._create_test_workflow() - - # Configure mock - self.mydom_mock.loadWorkflowState.return_value = workflow - - print(f"{Fore.YELLOW}➤ Stopping workflow...{Style.RESET_ALL}") - # Stop the workflow - result = await self.manager.workflowStop(workflow["id"]) - - # Print result details - print(f"{Fore.GREEN}✓ Workflow stopped:{Style.RESET_ALL}") - print(f" Status: {result.get('status', 'N/A')}") - - # Assert workflow state - assert result["status"] == "stopped", "Workflow status should be 'stopped'" - - # Verify database update - print(f"{Fore.YELLOW}➤ Verifying database update...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Database correctly updated with stopped status{Style.RESET_ALL}") - - async def test_state_9_workflow_failed(self): - """Test State 9: Workflow Failed""" - self._print_test_info(9, "Workflow Failed", - "Testing agent failure handling") - - # Create a test workflow - workflow = self._create_test_workflow() - - # Introduce a failing agent - failing_agent = MagicMock() - async def fail_task(*args, **kwargs): - raise ValueError("Agent failure") - failing_agent.processTask = AsyncMock(side_effect=fail_task) - failing_agent.mydom = self.mydom_mock - self.registry_mock.getAgent.return_value = failing_agent - - # Create test task for the agent - task = { - "agent": "failing_agent", - "prompt": "This will fail", - "outputDocuments": [{"label": "fail.txt", "prompt": ""}], - "inputDocuments": [] - } - - print(f"{Fore.YELLOW}➤ Executing failing agent task...{Style.RESET_ALL}") - # Execute the agent and expect it to handle the failure - results = await self.manager.agentProcessing(task, workflow) - - # Print results - print(f"{Fore.GREEN}✓ Agent failure handled correctly:{Style.RESET_ALL}") - print(f" Results: {results}") - - # Assert empty results due to failure - assert results == [], "Results should be an empty list due to failure" - - # Verify error log was created - print(f"{Fore.YELLOW}➤ Verifying error logging...{Style.RESET_ALL}") - self.mydom_mock.createWorkflowLog.assert_called() - print(f"{Fore.GREEN}✓ Error correctly logged{Style.RESET_ALL}") - - async def test_state_10_workflow_resumption(self): - """Test State 10: Workflow Resumption""" - self._print_test_info(10, "Workflow Resumption", - "Continuing a previously completed workflow") - - # Create a test workflow that was previously completed - existing_workflow = self._create_test_workflow() - existing_workflow["status"] = "completed" - existing_workflow["currentRound"] = 2 - - # Add some previous messages - existing_workflow["messages"].append(self._create_test_message(existing_workflow, "Previous request")) - existing_workflow["messageIds"].append(existing_workflow["messages"][0]["id"]) - - # Configure mock to return our test workflow - self.mydom_mock.getWorkflow.return_value = existing_workflow - - print(f"{Fore.YELLOW}➤ Resuming workflow...{Style.RESET_ALL}") - # Resume the workflow with a new message - workflow = self.manager.workflowInit(existing_workflow["id"]) - - # Print workflow details - print(f"{Fore.GREEN}✓ Workflow resumed:{Style.RESET_ALL}") - print(f" ID: {workflow.get('id', 'N/A')}") - print(f" Status: {workflow.get('status', 'N/A')}") - print(f" Round: {workflow.get('currentRound', 'N/A')}") - - # Assert workflow state - assert workflow["id"] == existing_workflow["id"], "Workflow ID should match existing ID" - assert workflow["status"] == "running", "Workflow status should be 'running'" - assert workflow["currentRound"] == 3, "Workflow round should be incremented to 3" - - # Verify database update - print(f"{Fore.YELLOW}➤ Verifying database update...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Database correctly updated with new round{Style.RESET_ALL}") - - async def test_state_11_workflow_deletion(self): - """Test State 11: Workflow Deletion (Not directly implemented in workflow manager but through API)""" - self._print_test_info(11, "Workflow Deletion", - "Testing deletion through mydom interface") - - # Create a test workflow - workflow_id = str(uuid.uuid4()) - - # Configure mock - self.mydom_mock.getWorkflow.return_value = {"id": workflow_id, "userId": self.userId} - self.mydom_mock.deleteWorkflow.return_value = True - - print(f"{Fore.YELLOW}➤ Deleting workflow...{Style.RESET_ALL}") - # Delete the workflow through mydom - result = self.mydom_mock.deleteWorkflow(workflow_id) - - # Print result - print(f"{Fore.GREEN}✓ Workflow deletion result: {result}{Style.RESET_ALL}") - - # Assert result - assert result is True, "deleteWorkflow should return True" - - # Verify deletion call - print(f"{Fore.YELLOW}➤ Verifying deletion call...{Style.RESET_ALL}") - self.mydom_mock.deleteWorkflow.assert_called_once_with(workflow_id) - print(f"{Fore.GREEN}✓ Workflow correctly deleted{Style.RESET_ALL}") - - # ------------------------------------------------------------------------ - # INTEGRATION TESTS - # ------------------------------------------------------------------------ - - async def test_full_workflow_cycle(self): - """Test a complete workflow cycle from start to finish""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}INTEGRATION TEST: Full Workflow Cycle{Style.RESET_ALL}") - print(f"{Fore.WHITE}This test simulates a complete workflow from start to finish{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - # Configure mocks for a successful flow - print(f"{Fore.YELLOW}➤ Setting up test environment...{Style.RESET_ALL}") - self._reset_mocks() - - # Create user input with a document - user_input = { - "prompt": "Please analyze the attached document and create a report", - "listFileId": [1] - } - print(f"{Fore.GREEN}✓ Created user input with document request{Style.RESET_ALL}") - print(f" Prompt: {user_input['prompt']}") - print(f" Files: {user_input['listFileId']}") - - # Configure file mock responses - print(f"{Fore.YELLOW}➤ Configuring mock files...{Style.RESET_ALL}") - self.mydom_mock.getFile.return_value = {"name": "source.txt", "mandateId": self.mandateId} - self.mydom_mock.getFileData.return_value = b"Source content for analysis" - self.mydom_mock.saveUploadedFile.return_value = {"id": 2, "name": "result.txt"} - print(f"{Fore.GREEN}✓ Mock files configured{Style.RESET_ALL}") - - # Start a new workflow - print(f"\n{Fore.YELLOW}➤ Starting workflow...{Style.RESET_ALL}") - workflow = await self.manager.workflowStart(user_input) - print(f"{Fore.GREEN}✓ Workflow started with ID: {workflow['id']}{Style.RESET_ALL}") - - # Wait for async processing to complete - print(f"{Fore.YELLOW}➤ Waiting for async processing...{Style.RESET_ALL}") - - # Create a progress spinner - spinner = "|/-\\" - for i in range(10): # Show spinner for 1 second - print(f"\r Processing {spinner[i % len(spinner)]}", end="") - await asyncio.sleep(0.1) - print("\r Processing complete! ") - - # Visualize the workflow state transitions - print(f"\n{Fore.CYAN}Workflow State Transitions:{Style.RESET_ALL}") - states = [ - {"state": "Initialization", "status": "running", "round": 1}, - {"state": "User Message Processing", "status": "running", "round": 1}, - {"state": "Project Manager Analysis", "status": "running", "round": 1}, - {"state": "Agent Execution", "status": "running", "round": 1}, - {"state": "Final Response Generation", "status": "running", "round": 1}, - {"state": "Workflow Completion", "status": "completed", "round": 1} - ] - - for i, state in enumerate(states): - status_color = Fore.GREEN if state["status"] == "completed" else Fore.YELLOW - print(f" {i+1}. {state['state']}: {status_color}{state['status']}{Style.RESET_ALL}") - - # Verify start state - print(f"\n{Fore.YELLOW}➤ Verifying workflow state...{Style.RESET_ALL}") - print(f" Initial status: {Fore.YELLOW}{workflow['status']}{Style.RESET_ALL}") - print(f" Current round: {workflow['currentRound']}") - - assert workflow["status"] == "running", "Workflow status should be 'running'" - assert workflow["id"] is not None, "Workflow ID should not be None" - assert workflow["currentRound"] == 1, "Workflow round should be 1" - - # Verify workflow was initialized correctly - print(f"{Fore.YELLOW}➤ Verifying database interactions...{Style.RESET_ALL}") - self.mydom_mock.createWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Workflow correctly initialized in database{Style.RESET_ALL}") - - async def test_workflow_with_exception(self): - """Test workflow handling an exception during processing""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}INTEGRATION TEST: Workflow Exception Handling{Style.RESET_ALL}") - print(f"{Fore.WHITE}This test simulates a workflow with an exception during processing{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - # Configure mocks for an exception scenario - print(f"{Fore.YELLOW}➤ Setting up exception scenario...{Style.RESET_ALL}") - self._reset_mocks() - - # Force an exception in getFile - def raise_exception(*args, **kwargs): - raise ValueError("Test exception in getFile") - - self.mydom_mock.getFile.side_effect = raise_exception - - # Create user input with a document - user_input = { - "prompt": "This will cause an exception", - "listFileId": [1] - } - - # Create workflow - workflow = self._create_test_workflow() - - print(f"{Fore.YELLOW}➤ Processing workflow with exception...{Style.RESET_ALL}") - # Process with exception - result = await self.manager.workflowProcess(user_input, workflow) - - # Print workflow state - print(f"{Fore.GREEN}✓ Exception handled:{Style.RESET_ALL}") - print(f" Status: {result.get('status', 'N/A')}") - - # Verify failure state - assert result["status"] == "failed", "Workflow status should be 'failed'" - - # Verify error log - print(f"{Fore.YELLOW}➤ Verifying error logging...{Style.RESET_ALL}") - self.mydom_mock.createWorkflowLog.assert_called() - print(f"{Fore.GREEN}✓ Error correctly logged{Style.RESET_ALL}") - - # Verify database update - print(f"{Fore.YELLOW}➤ Verifying database update...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called() - print(f"{Fore.GREEN}✓ Database correctly updated with failure status{Style.RESET_ALL}") - - async def test_workflow_stop_during_processing(self): - """Test stopping a workflow during processing""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}INTEGRATION TEST: Workflow Stop During Processing{Style.RESET_ALL}") - print(f"{Fore.WHITE}This test simulates stopping a workflow during processing{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - # Create a test workflow that is running - workflow = self._create_test_workflow() - - # Configure mock - self.mydom_mock.loadWorkflowState.return_value = workflow - - print(f"{Fore.YELLOW}➤ Stopping workflow during processing...{Style.RESET_ALL}") - # Stop the workflow - stopped_workflow = await self.manager.workflowStop(workflow["id"]) - - # Print workflow state - print(f"{Fore.GREEN}✓ Workflow stopped:{Style.RESET_ALL}") - print(f" Status: {stopped_workflow.get('status', 'N/A')}") - - # Verify stopped state - assert stopped_workflow["status"] == "stopped", "Workflow status should be 'stopped'" - - # Verify database update - print(f"{Fore.YELLOW}➤ Verifying database update...{Style.RESET_ALL}") - self.mydom_mock.updateWorkflow.assert_called_once() - print(f"{Fore.GREEN}✓ Database correctly updated with stopped status{Style.RESET_ALL}") - - # ------------------------------------------------------------------------ - # UTILITY FUNCTION TESTS - # ------------------------------------------------------------------------ - - def test_parse_json_response(self): - """Test JSON response parsing""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}UTILITY TEST: JSON Response Parsing{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - # Test with clean JSON - print(f"{Fore.YELLOW}➤ Testing with clean JSON...{Style.RESET_ALL}") - clean_json = '{"key": "value", "number": 123}' - result = self.manager.parseJsonResponse(clean_json) - - print(f"{Fore.GREEN}✓ Clean JSON result:{Style.RESET_ALL}") - print(f" key: {result.get('key', 'N/A')}") - print(f" number: {result.get('number', 'N/A')}") - - assert result["key"] == "value", "Clean JSON parsing should extract key value" - assert result["number"] == 123, "Clean JSON parsing should extract number value" - - # Test with JSON embedded in text - print(f"\n{Fore.YELLOW}➤ Testing with JSON embedded in text...{Style.RESET_ALL}") - text_with_json = 'Some text before {"key": "value"} and after' - result = self.manager.parseJsonResponse(text_with_json) - - print(f"{Fore.GREEN}✓ Embedded JSON result:{Style.RESET_ALL}") - print(f" key: {result.get('key', 'N/A')}") - - assert result["key"] == "value", "Embedded JSON parsing should extract key value" - - # Test with invalid JSON - print(f"\n{Fore.YELLOW}➤ Testing with invalid JSON...{Style.RESET_ALL}") - invalid_json = 'Not a JSON at all' - result = self.manager.parseJsonResponse(invalid_json) - - print(f"{Fore.GREEN}✓ Invalid JSON fallback result:{Style.RESET_ALL}") - print(f" objWorkplan: {result.get('objWorkplan', 'N/A')}") - - # Should return a fallback structure - assert "objFinalDocuments" in result, "Invalid JSON should return fallback with objFinalDocuments" - assert "objWorkplan" in result, "Invalid JSON should return fallback with objWorkplan" - assert "objUserResponse" in result, "Invalid JSON should return fallback with objUserResponse" - print(f"{Fore.GREEN}✓ All JSON parsing scenarios handled correctly{Style.RESET_ALL}") - - def test_get_filename(self): - """Test filename extraction from document""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}UTILITY TEST: Filename Extraction{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - # Test with extension - print(f"{Fore.YELLOW}➤ Testing with extension...{Style.RESET_ALL}") - document = {"name": "test", "ext": "txt"} - filename = self.manager.getFilename(document) - - print(f"{Fore.GREEN}✓ Filename with extension result:{Style.RESET_ALL}") - print(f" Result: {filename}") - - assert filename == "test.txt", "Filename should be 'test.txt'" - - # Test with no extension - print(f"\n{Fore.YELLOW}➤ Testing with no extension...{Style.RESET_ALL}") - document = {"name": "test", "ext": ""} - filename = self.manager.getFilename(document) - - print(f"{Fore.GREEN}✓ Filename without extension result:{Style.RESET_ALL}") - print(f" Result: {filename}") - - assert filename == "test", "Filename should be 'test'" - print(f"{Fore.GREEN}✓ All filename extraction scenarios handled correctly{Style.RESET_ALL}") - - def test_get_available_documents(self): - """Test getting available documents from workflow""" - print(f"\n{Fore.CYAN}{Style.BRIGHT}UTILITY TEST: Get Available Documents{Style.RESET_ALL}") - print(f"{Fore.YELLOW}{'-' * 60}{Style.RESET_ALL}") - - # Create a test workflow with messages containing documents - workflow = self._create_test_workflow() - - # Add user message with document - print(f"{Fore.YELLOW}➤ Creating user message with document...{Style.RESET_ALL}") - user_message = self._create_test_message(workflow, "Message with document") - user_message["documents"] = [self._create_test_document("user_doc.txt")] - workflow["messages"].append(user_message) - - # Add assistant message with document - print(f"{Fore.YELLOW}➤ Creating assistant message with document...{Style.RESET_ALL}") - assistant_message = self._create_test_message(workflow, "Response with document", "assistant") - assistant_message["documents"] = [self._create_test_document("assistant_doc.txt")] - workflow["messages"].append(assistant_message) - - print(f"{Fore.YELLOW}➤ Getting available documents...{Style.RESET_ALL}") - # Get available documents - available_docs = self.manager.getAvailableDocuments(workflow, user_message) - - # Print results - print(f"{Fore.GREEN}✓ Available documents result:{Style.RESET_ALL}") - print(f" Count: {len(available_docs)}") - for i, doc in enumerate(available_docs): - print(f" {i+1}. {doc.get('label', 'N/A')} ({doc.get('fileSource', 'N/A')})") - - # Verify result - assert len(available_docs) == 2, "Available documents should have 2 entries" - # Should be sorted newest first - assert available_docs[0]["label"] == "assistant_doc.txt", "First document should be assistant_doc.txt" - assert available_docs[1]["label"] == "user_doc.txt", "Second document should be user_doc.txt" - # User's document should be marked as from user - assert available_docs[1]["fileSource"] == "user", "User document should have fileSource='user'" - print(f"{Fore.GREEN}✓ Available documents correctly identified and sorted{Style.RESET_ALL}") - - -# Simple test runner -if __name__ == "__main__": - # Import the runner and execute tests - try: - sys.path.append(os.path.dirname(os.path.abspath(__file__))) - from tool_testBackendSingle import run_tests - asyncio.run(run_tests()) - except ImportError: - print(f"{Fore.YELLOW}Please use tool_testBackendSingle.py to run the tests properly{Style.RESET_ALL}") \ No newline at end of file diff --git a/tool_testUser.py b/tool_testUser.py deleted file mode 100644 index 7f328ca8..00000000 --- a/tool_testUser.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env python3 -""" -Direct Interface Workflow Test Script - -This script bypasses the API layer and works directly with the interface classes -to simulate a user uploading two files and then sending a chat request with these files. - -It follows the state machine as defined in the backend documentation. -""" - -import os -import sys -import json -import asyncio -import uuid -from datetime import datetime - -# Adjust import paths -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.dirname(current_dir) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - -# Try to import the required modules -try: - from modules.workflowManager import getWorkflowManager - from modules.lucydomInterface import getLucydomInterface -except ImportError: - print("Error: Required modules not found. Attempting alternative imports...") - try: - from gateway.modules.workflowManager import getWorkflowManager - from gateway.modules.lucydomInterface import getLucydomInterface - except ImportError: - print("Error: Could not import required modules. Make sure the script is run from the correct directory.") - sys.exit(1) - -# Constants -MANDATE_ID = 1 -USER_ID = 1 -#USER_PROMPT = "Please analyze these sales figures and the chart to identify key trends and opportunities." -#USER_PROMPT = "Please make me a svg file with forecast for Apr-Jun." -USER_PROMPT = "Please make me a jpg file with forecast for Apr-Jun." - -# Sample files to upload -SAMPLE_SVG = """ - - Sales Q1 Bar Chart - - - - - - - - Sales ($) - - - Month - - - - Jan - $150K - - - - Feb - $165K - - - - Mar - $180K - - -""" - -SAMPLE_DATA = """ -# Sales Data - Q1 2023 - -Month,Revenue,Growth,Units Sold -January,150000,5.2%,1250 -February,165000,10.0%,1380 -March,180000,9.1%,1490 - -## Regional Breakdown -- North: 35% of total sales -- South: 25% of total sales -- East: 20% of total sales -- West: 20% of total sales - -## Top Products -1. Product A: 40% of revenue -2. Product B: 30% of revenue -3. Product C: 20% of revenue -4. Others: 10% of revenue -""" - -async def create_test_files(mydom): - """Create two test files and return their IDs""" - print("\n--- Uploading Test Files (State 0: File Upload) ---") - - # Create SVG chart file - print("Uploading SVG chart file...") - chart_meta = mydom.saveUploadedFile(SAMPLE_SVG.encode('utf-8'), "q1_sales_chart.svg") - chart_id = chart_meta['id'] - print(f"Created SVG chart file with ID: {chart_id}") - - # Create data text file - print("Uploading markdown data file...") - data_meta = mydom.saveUploadedFile(SAMPLE_DATA.encode('utf-8'), "q1_sales_data.md") - data_id = data_meta['id'] - print(f"Created markdown data file with ID: {data_id}") - - return chart_id, data_id - -async def monitor_workflow(mydom, workflow_id, timeout=300, interval=2): - """Monitor the workflow until it completes or times out""" - print("\n--- Monitoring Workflow ---") - start_time = datetime.now() - elapsed = 0 - - while elapsed < timeout: - # Get current workflow state - workflow = mydom.loadWorkflowState(workflow_id) - if not workflow: - print("Error: Workflow not found") - return None - - status = workflow.get("status", "unknown") - - # Show progress - logs = workflow.get("logs", []) - latest_log = logs[-1] if logs else None - - if latest_log: - progress = latest_log.get("progress", 0) - message = latest_log.get("message", "No message") - print(f"Status: {status} | Progress: {progress}% | {message}") - - # Check if workflow is done - if status in ["completed", "failed", "stopped"]: - if status == "completed": - print("\nWorkflow completed successfully!") - elif status == "failed": - print("\nWorkflow failed!") - else: - print("\nWorkflow was stopped!") - return workflow - - # Wait before checking again - await asyncio.sleep(interval) - elapsed = (datetime.now() - start_time).total_seconds() - - print(f"Monitoring timed out after {timeout} seconds") - return mydom.loadWorkflowState(workflow_id) - -async def run_test(): - """Main test function that follows the state machine workflow""" - print("\n=== Direct Interface Workflow Test ===\n") - - # Initialize the interfaces - print("Initializing system...") - mydom = getLucydomInterface(MANDATE_ID, USER_ID) - manager = getWorkflowManager(MANDATE_ID, USER_ID) - - # Upload test files (State 0: File Upload) - chart_id, data_id = await create_test_files(mydom) - - # Prepare the user input - user_input = { - "prompt": USER_PROMPT, - "listFileId": [chart_id, data_id] - } - - # Start workflow (State 1: Workflow Initialization) - print(f"\n--- Starting Workflow (State 1: Workflow Initialization) ---") - print(f"Sending user prompt: '{USER_PROMPT}'") - print(f"With files: SVG chart (ID: {chart_id}) and sales data (ID: {data_id})") - - # Start the workflow with the user input - workflow = await manager.workflowStart(user_input) - workflow_id = workflow["id"] - - print(f"Workflow initiated with ID: {workflow_id}") - print(f"Initial status: {workflow['status']}") - - # Monitor the workflow progress - # This will monitor states 2-7 of the state machine - await monitor_workflow(mydom, workflow_id, timeout=120) - - # Get final workflow state - final_workflow = mydom.loadWorkflowState(workflow_id) - - # Print the results - print("\n--- Final Workflow Results ---") - if final_workflow: - # Print status information - print(f"Workflow Status: {final_workflow.get('status', 'unknown')}") - print(f"Current Round: {final_workflow.get('currentRound', 0)}") - - # Print messages - print("\n=== Messages ===") - for msg in final_workflow.get("messages", []): - role = msg.get("role", "unknown") - agent = msg.get("agentName", "") - - # Get a preview of the content - content = msg.get("content", "") - if len(content) > 100: - content_preview = content[:100] + "..." - else: - content_preview = content - - # Format based on role - if role == "assistant" and agent: - print(f"\n[{role} - {agent}]: {content_preview}") - else: - print(f"\n[{role}]: {content_preview}") - - # Print document info - docs = msg.get("documents", []) - if docs: - print(f" Documents ({len(docs)}):") - for doc in docs: - name = doc.get("name", "unnamed") - ext = doc.get("ext", "") - file_id = doc.get("fileId", "unknown") - print(f" - {name}.{ext} (ID: {file_id})") - - # Print the final log - logs = final_workflow.get("logs", []) - if logs: - final_log = logs[-1] - print(f"\nFinal Log: {final_log.get('message', 'No message')}") - else: - print("Error: Could not retrieve final workflow state") - - print("\n=== Test Complete ===") - return workflow_id - -if __name__ == "__main__": - workflow_id = asyncio.run(run_test()) - print(f"Completed workflow ID: {workflow_id}") \ No newline at end of file