diff --git a/app.py b/app.py
index 9ce30d13..bf34d3a4 100644
--- a/app.py
+++ b/app.py
@@ -114,7 +114,14 @@ app.add_middleware(
baseDir = pathlib.Path(__file__).parent
staticFolder = baseDir / "static"
os.makedirs(staticFolder, exist_ok=True)
-app.mount("/static", StaticFiles(directory=str(staticFolder)), name="static")
+
+# Mount static files with proper configuration
+app.mount("/static", StaticFiles(directory=str(staticFolder), html=True), name="static")
+
+# Add favicon route
+@app.get("/favicon.ico")
+async def favicon():
+ return FileResponse(str(staticFolder / "favicon.ico"), media_type="image/x-icon")
# General Elements
@app.get("/", tags=["General"])
diff --git a/connectors/BACKUP-connectorDbJson.py b/connectors/BACKUP-connectorDbJson.py
deleted file mode 100644
index f4bdea80..00000000
--- a/connectors/BACKUP-connectorDbJson.py
+++ /dev/null
@@ -1,569 +0,0 @@
-import json
-import os
-from typing import List, Dict, Any, Optional, Union
-import logging
-
-logger = logging.getLogger(__name__)
-
-class DatabaseConnector:
- """
- A connector for JSON-based data storage.
- Provides generic database operations with tenant and user context support.
- """
- def __init__(self, dbHost: str, dbDatabase: str, dbUser: str = None, dbPassword: str = None,
- mandateId: int = None, userId: int = None, skipInitialIdLookup: bool = False):
- """
- Initializes the JSON database connector.
-
- Args:
- dbHost: Directory for the JSON files
- dbDatabase: Database name
- dbUser: Username for authentication (optional)
- dbPassword: API key for authentication (optional)
- mandateId: Context parameter for the tenant
- userId: Context parameter for the user
- skipInitialIdLookup: When True, skips looking up initial IDs for mandateId and userId
- """
- # Store the input parameters
- self.dbHost = dbHost
- self.dbDatabase = dbDatabase
- self.dbUser = dbUser
- self.dbPassword = dbPassword
- self.skipInitialIdLookup = skipInitialIdLookup
-
- # Check if context parameters are set
- if mandateId is None or userId is None:
- raise ValueError("mandateId and userId must be set")
-
- # Ensure the database directory exists
- self.dbFolder = os.path.join(self.dbHost, self.dbDatabase)
- os.makedirs(self.dbFolder, exist_ok=True)
-
- # Cache for loaded data
- self._tablesCache = {}
-
- # Initialize system table
- self._systemTableName = "_system"
- self._initializeSystemTable()
-
- # Temporarily store mandateId and userId
- self._mandateId = mandateId
- self._userId = userId
-
- # If mandateId or userId are 0 and we're not skipping ID lookup, try to use the initial IDs
- if not skipInitialIdLookup:
- if mandateId == 0:
- initialMandateId = self.getInitialId("mandates")
- if initialMandateId is not None:
- self._mandateId = initialMandateId
- logger.info(f"Using initial mandateId: {initialMandateId} instead of 0")
-
- if userId == 0:
- initialUserId = self.getInitialId("users")
- if initialUserId is not None:
- self._userId = initialUserId
- logger.info(f"Using initial userId: {initialUserId} instead of 0")
-
- # Set the effective IDs as properties
- self.mandateId = self._mandateId
- self.userId = self._userId
-
- logger.info(f"DatabaseConnector initialized for directory: {self.dbFolder}")
- logger.debug(f"Context: mandateId={self.mandateId}, userId={self.userId}")
-
- def _initializeSystemTable(self):
- """Initializes the system table if it doesn't exist yet."""
- systemTablePath = self._getTablePath(self._systemTableName)
- if not os.path.exists(systemTablePath):
- emptySystemTable = {}
- self._saveSystemTable(emptySystemTable)
- logger.info(f"System table initialized in {systemTablePath}")
- else:
- # Load existing system table to ensure it's available
- self._loadSystemTable()
- logger.debug(f"Existing system table loaded from {systemTablePath}")
-
- def _loadSystemTable(self) -> Dict[str, int]:
- """Loads the system table with the initial IDs."""
- # Check if system table is in cache
- if f"_{self._systemTableName}" in self._tablesCache:
- return self._tablesCache[f"_{self._systemTableName}"]
-
- systemTablePath = self._getTablePath(self._systemTableName)
- try:
- if os.path.exists(systemTablePath):
- with open(systemTablePath, 'r', encoding='utf-8') as f:
- data = json.load(f)
- # Store in cache with special prefix to avoid collision with regular tables
- self._tablesCache[f"_{self._systemTableName}"] = data
- return data
- else:
- self._tablesCache[f"_{self._systemTableName}"] = {}
- return {}
- except Exception as e:
- logger.error(f"Error loading the system table: {e}")
- self._tablesCache[f"_{self._systemTableName}"] = {}
- return {}
-
- def _saveSystemTable(self, data: Dict[str, int]) -> bool:
- """Saves the system table with the initial IDs."""
- systemTablePath = self._getTablePath(self._systemTableName)
- try:
- with open(systemTablePath, 'w', encoding='utf-8') as f:
- json.dump(data, f, indent=2, ensure_ascii=False)
- # Update cache
- self._tablesCache[f"_{self._systemTableName}"] = data
- return True
- except Exception as e:
- logger.error(f"Error saving the system table: {e}")
- return False
-
- def _getTablePath(self, table: str) -> str:
- """Returns the full path to a table file"""
- return os.path.join(self.dbFolder, f"{table}.json")
-
- def _loadTable(self, table: str) -> List[Dict[str, Any]]:
- """Loads a table from the corresponding JSON file"""
- path = self._getTablePath(table)
-
- # If the table is the system table, load it directly
- if table == self._systemTableName:
- return [] # The system table is not treated like normal tables
-
- # If the table is already in the cache, use the cache
- if table in self._tablesCache:
- return self._tablesCache[table]
-
- # Otherwise load the file
- try:
- if os.path.exists(path):
- with open(path, 'r', encoding='utf-8') as f:
- data = json.load(f)
- self._tablesCache[table] = data
-
- # If data was loaded and no initial ID is registered yet,
- # register the ID of the first record (if available)
- if data and not self.hasInitialId(table):
- if "id" in data[0]:
- self._registerInitialId(table, data[0]["id"])
- logger.info(f"Initial ID {data[0]['id']} for table {table} retroactively registered")
-
- return data
- else:
- # If the file doesn't exist, create an empty table
- logger.info(f"New table {table}")
- self._tablesCache[table] = []
- self._saveTable(table, [])
- return []
- except Exception as e:
- logger.error(f"Error loading table {table}: {e}")
- return []
-
- def _saveTable(self, table: str, data: List[Dict[str, Any]]) -> bool:
- """Saves a table to the corresponding JSON file"""
- # The system table is handled specially
- if table == self._systemTableName:
- return False
-
- path = self._getTablePath(table)
- try:
- with open(path, 'w', encoding='utf-8') as f:
- json.dump(data, f, indent=2, ensure_ascii=False)
-
- # Update the cache
- self._tablesCache[table] = data
- return True
- except Exception as e:
- logger.error(f"Error saving table {table}: {e}")
- return False
-
- def _filterByContext(self, records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """
- Filters records by tenant and user context,
- if these fields exist in the record.
- """
- filteredRecords = []
-
- for record in records:
- # Check if mandateId exists in the record and is not null
- hasMandate = "mandateId" in record and record["mandateId"] is not None and record["mandateId"] != ""
-
- # Check if userId exists in the record and is not null
- hasUser = "userId" in record and record["userId"] is not None and record["userId"] != ""
-
- # If both exist, filter accordingly
- if hasMandate and hasUser:
- if record["mandateId"] == self.mandateId:
- filteredRecords.append(record)
- # If only mandateId exists
- elif hasMandate and not hasUser:
- if record["mandateId"] == self.mandateId:
- filteredRecords.append(record)
- # If neither mandateId nor userId exist, add the record
- elif not hasMandate and not hasUser:
- filteredRecords.append(record)
-
- return filteredRecords
-
-
- def _applyRecordFilter(self, records: List[Dict[str, Any]], recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]:
- """Applies a record filter to the records"""
- if not recordFilter:
- return records
-
- filteredRecords = []
-
- for record in records:
- match = True
-
- for field, value in recordFilter.items():
- # Check if the field exists
- if field not in record:
- match = False
- break
-
- # Handle type conversion for integer comparisons both ways
- if isinstance(value, int) and isinstance(record[field], str) and record[field].isdigit():
- # Filter value is int, record value is string
- if value != int(record[field]):
- match = False
- break
- elif isinstance(value, str) and value.isdigit() and isinstance(record[field], int):
- # Filter value is string, record value is int
- if record[field] != int(value):
- match = False
- break
- # Otherwise direct comparison
- elif record[field] != value:
- match = False
- break
-
- if match:
- filteredRecords.append(record)
-
- return filteredRecords
-
- def _registerInitialId(self, table: str, initialId: int) -> bool:
- """
- Registers the initial ID for a table.
-
- Args:
- table: Name of the table
- initialId: The initial ID
-
- Returns:
- True on success, False on error
- """
- try:
- # Load the current system table
- systemData = self._loadSystemTable()
-
- # Only register if not already present
- if table not in systemData:
- systemData[table] = initialId
- success = self._saveSystemTable(systemData)
- if success:
- logger.info(f"Initial ID {initialId} for table {table} registered")
- return success
- return True # If already present, this is not an error
- except Exception as e:
- logger.error(f"Error registering the initial ID for table {table}: {e}")
- return False
-
- def _removeInitialId(self, table: str) -> bool:
- """
- Removes the initial ID for a table from the system table.
-
- Args:
- table: Name of the table
-
- Returns:
- True on success, False on error
- """
- try:
- # Load the current system table
- systemData = self._loadSystemTable()
-
- # Remove the entry if it exists
- if table in systemData:
- del systemData[table]
- success = self._saveSystemTable(systemData)
- if success:
- logger.info(f"Initial ID for table {table} removed from system table")
- return success
- return True # If not present, this is not an error
- except Exception as e:
- logger.error(f"Error removing initial ID for table {table}: {e}")
- return False
-
- # Public API
-
- def getTables(self) -> List[str]:
- """
- Returns a list of all available tables.
-
- Returns:
- List of table names
- """
- tables = []
-
- try:
- for filename in os.listdir(self.dbFolder):
- if filename.endswith('.json') and not filename.startswith('_'):
- tableName = filename[:-5] # Remove the .json extension
- tables.append(tableName)
- except Exception as e:
- logger.error(f"Error reading the database directory: {e}")
-
- return tables
-
- def getFields(self, table: str) -> List[str]:
- """
- Returns a list of all fields in a table.
-
- Args:
- table: Name of the table
-
- Returns:
- List of field names
- """
- # Load the table data
- data = self._loadTable(table)
-
- if not data:
- return []
-
- # Take the first record as a reference for the fields
- fields = list(data[0].keys()) if data else []
-
- return fields
-
- def getSchema(self, table: str, language: str = None) -> Dict[str, Dict[str, Any]]:
- """
- Returns a schema object for a table with data types and labels.
-
- Args:
- table: Name of the table
- language: Language for the labels (optional)
-
- Returns:
- Schema object with fields, data types and labels
- """
- # Load the table data
- data = self._loadTable(table)
-
- schema = {}
-
- if not data:
- return schema
-
- # Take the first record as a reference for the fields and data types
- firstRecord = data[0]
-
- for field, value in firstRecord.items():
- # Determine the data type
- dataType = type(value).__name__
-
- # Create label (default is the field name)
- label = field
-
- schema[field] = {
- "type": dataType,
- "label": label
- }
-
- return schema
-
- def getRecordset(self, table: str, fieldFilter: List[str] = None, recordFilter: Dict[str, Any] = None) -> List[Dict[str, Any]]:
- """
- Returns a list of records from a table, filtered by criteria.
-
- Args:
- table: Name of the table
- fieldFilter: Filter for fields (which fields should be returned)
- recordFilter: Filter for records (which records should be returned)
-
- Returns:
- List of filtered records
- """
- # Load the table data
- data = self._loadTable(table)
- logger.debug(f"getRecordset: data volume of {len(data)} bytes")
-
- # Filter by tenant and user context
- filteredData = self._filterByContext(data)
-
- # Apply recordFilter if available
- if recordFilter:
- filteredData = self._applyRecordFilter(filteredData, recordFilter)
-
- # If fieldFilter is available, reduce the fields
- if fieldFilter and isinstance(fieldFilter, list):
- result = []
- for record in filteredData:
- filteredRecord = {}
- for field in fieldFilter:
- if field in record:
- filteredRecord[field] = record[field]
- result.append(filteredRecord)
- return result
-
- return filteredData
-
- def recordCreate(self, table: str, recordData: Dict[str, Any]) -> Dict[str, Any]:
- """
- Creates a new record in the table.
-
- Args:
- table: Name of the table
- recordData: Data for the new record
-
- Returns:
- The created record
- """
- # Load the table data
- data = self._loadTable(table)
-
- # Add mandateId and userId if not present or 0
- if "mandateId" not in recordData or recordData["mandateId"] == 0:
- recordData["mandateId"] = self.mandateId
-
- if "userId" not in recordData or recordData["userId"] == 0:
- recordData["userId"] = self.userId
-
- # Determine the next ID if not present
- if "id" not in recordData:
- nextId = 1
- if data:
- nextId = max(record["id"] for record in data if "id" in record) + 1
- recordData["id"] = nextId
-
- # If the table is empty and a system ID should be registered
- if not data:
- self._registerInitialId(table, recordData["id"])
- logger.info(f"Initial ID {recordData['id']} for table {table} has been registered")
-
- # Add the new record
- data.append(recordData)
-
- # Save the updated table
- if self._saveTable(table, data):
- return recordData
- else:
- raise ValueError(f"Error creating the record in table {table}")
-
- def recordDelete(self, table: str, recordId: Union[str, int]) -> bool:
- """
- Deletes a record from the table.
-
- Args:
- table: Name of the table
- recordId: ID of the record to delete
-
- Returns:
- True on success, False on error
- """
- # Load table data
- data = self._loadTable(table)
-
- # Search for the record
- for i, record in enumerate(data):
- if "id" in record and record["id"] == recordId:
- # Check if the record belongs to the current mandate
- if "mandateId" in record and record["mandateId"] != self.mandateId:
- raise ValueError("Not your mandate")
-
- # Check if it's an initial record
- initialId = self.getInitialId(table)
- if initialId is not None and initialId == recordId:
- # Remove this entry from the system table
- self._removeInitialId(table)
- logger.info(f"Initial ID {recordId} for table {table} has been removed from the system table")
-
- # Delete the record
- del data[i]
-
- # Save the updated table
- return self._saveTable(table, data)
-
- # Record not found
- return False
-
- def recordModify(self, table: str, recordId: Union[str, int], recordData: Dict[str, Any]) -> Dict[str, Any]:
- """
- Modifies a record in the table.
-
- Args:
- table: Name of the table
- recordId: ID of the record to modify
- recordData: New data for the record
-
- Returns:
- The updated record
- """
- # Load table data
- data = self._loadTable(table)
-
- # Search for the record
- for i, record in enumerate(data):
- if "id" in record and record["id"] == recordId:
- # Check if the record belongs to the current mandate
- if "mandateId" in record and record["mandateId"] != self.mandateId:
- raise ValueError("Not your mandate")
-
- # Prevent changing the ID
- if "id" in recordData and recordData["id"] != recordId:
- raise ValueError(f"The ID of a record in table {table} cannot be changed")
-
- # Update the record
- for key, value in recordData.items():
- data[i][key] = value
-
- # Save the updated table
- if self._saveTable(table, data):
- return data[i]
- else:
- raise ValueError(f"Error updating record in table {table}")
-
- # Record not found
- raise ValueError(f"Record with ID {recordId} not found in table {table}")
-
- def hasInitialId(self, table: str) -> bool:
- """
- Checks if an initial ID is registered for a table.
-
- Args:
- table: Name of the table
-
- Returns:
- True if an initial ID is registered, otherwise False
- """
- systemData = self._loadSystemTable()
- return table in systemData
-
- def getInitialId(self, table: str) -> Optional[int]:
- """
- Returns the initial ID for a table.
-
- Args:
- table: Name of the table
-
- Returns:
- The initial ID or None if not present
- """
- systemData = self._loadSystemTable()
- initialId = systemData.get(table)
- logger.debug(f"Database '{self.dbDatabase}': Initial ID for table '{table}' is {initialId}")
- if initialId is None:
- logger.debug(f"No initial ID found for table {table}")
- return initialId
-
- def getAllInitialIds(self) -> Dict[str, int]:
- """
- Returns all registered initial IDs.
-
- Returns:
- Dictionary with table names as keys and initial IDs as values
- """
- systemData = self._loadSystemTable()
- return systemData.copy() # Return a copy to protect the original
\ No newline at end of file
diff --git a/env_dev.env b/env_dev.env
index 71cd2dff..ad9fcbaa 100644
--- a/env_dev.env
+++ b/env_dev.env
@@ -3,7 +3,7 @@
# System Configuration
APP_ENV_TYPE = dev
APP_ENV_LABEL = Development Instance Patrick
-APP_API_URL = http://localhost:8080
+APP_API_URL = http://localhost:8000
# Database Configuration System
DB_SYSTEM_HOST=D:/Temp/_powerondb
diff --git a/modules/agentAnalyst.py b/modules/agentAnalyst.py
index 5e68c91e..93107371 100644
--- a/modules/agentAnalyst.py
+++ b/modules/agentAnalyst.py
@@ -38,11 +38,10 @@ class AgentAnalyst(AgentBase):
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
- self.mydom = mydom
-
+
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
- Process a task by focusing on required outputs and using AI to generate them.
+ Process a task by focusing on required outputs and using AI to guide the analysis process.
Args:
task: Task dictionary with prompt, inputDocuments, outputSpecifications
@@ -53,62 +52,49 @@ class AgentAnalyst(AgentBase):
try:
# Extract task information
prompt = task.get("prompt", "")
- inputDocuments = task.get("inputDocuments", [])
outputSpecs = task.get("outputSpecifications", [])
+ workflow = task.get("context", {}).get("workflow", {})
# Check AI service
if not self.mydom:
return {
- "feedback": "The Analyst agent requires an AI service to function.",
+ "feedback": "The Analyst agent requires an AI service to function effectively.",
"documents": []
}
- # Extract data from documents - focusing only on dataExtracted
- datasets, documentContext = self._extractData(inputDocuments)
+ # Create analysis plan
+ if workflow:
+ self.workflowManager.logAdd(workflow, "Extracting data from documents...", level="info", progress=35)
+ analysisPlan = await self._createAnalysisPlan(prompt)
- # Generate task analysis to understand what's needed
- analysisPlan = await self._analyzeTask(prompt, documentContext, datasets, outputSpecs)
+ # Check if this is truly an analysis task
+ if not analysisPlan.get("requiresAnalysis", True):
+ return {
+ "feedback": "This task doesn't appear to require analysis. Please try a different agent.",
+ "documents": []
+ }
- # Generate all required output documents
- documents = []
+ # Analyze data
+ if workflow:
+ self.workflowManager.logAdd(workflow, "Analyzing task requirements...", level="info", progress=45)
+ analysisResults = await self._analyzeData(task, analysisPlan)
- # If no output specs provided, create default analysis outputs
- if not outputSpecs:
- outputSpecs = []
+ # Format results into requested output documents
+ totalSpecs = len(outputSpecs)
+ for i, spec in enumerate(outputSpecs):
+ progress = 50 + int((i / totalSpecs) * 40) # Progress from 50% to 90%
+ if self.workflowManager:
+ self.workflowManager.logAdd(workflow, f"Creating output {i+1}/{totalSpecs}...", level="info", progress=progress)
- # Process each output specification
- for spec in outputSpecs:
- outputLabel = spec.get("label", "")
- outputDescription = spec.get("description", "")
-
- # Determine type based on file extension
- outputType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "txt"
-
- # Generate appropriate content based on output type
- if outputType in ['png', 'jpg', 'jpeg', 'svg']:
- # Create visualization
- document = await self._createVisualization(
- datasets, prompt, outputLabel, analysisPlan, outputDescription
- )
- documents.append(document)
- elif outputType in ['csv', 'json', 'xlsx']:
- # Create data document
- document = await self._createDataDocument(
- datasets, prompt, outputLabel, analysisPlan, outputDescription
- )
- documents.append(document)
- else:
- # Create text document (report, analysis, etc.)
- document = await self._createTextDocument(
- datasets, documentContext, prompt, outputLabel,
- outputType, analysisPlan, outputDescription
- )
- documents.append(document)
+ documents = await self._createOutputDocuments(
+ prompt,
+ analysisResults,
+ outputSpecs,
+ analysisPlan
+ )
# Generate feedback
- feedback = f"{analysisPlan.get('analysisApproach')}"
- if analysisPlan.get("keyInsights"):
- feedback += f"\n\n{analysisPlan.get('keyInsights')}"
+ feedback = analysisPlan.get("feedback", f"I analyzed '{prompt[:50]}...' and generated {len(documents)} output documents.")
return {
"feedback": feedback,
@@ -116,7 +102,7 @@ class AgentAnalyst(AgentBase):
}
except Exception as e:
- logger.error(f"Error in analysis: {str(e)}", exc_info=True)
+ logger.error(f"Error during analysis: {str(e)}", exc_info=True)
return {
"feedback": f"Error during analysis: {str(e)}",
"documents": []
@@ -196,69 +182,74 @@ class AgentAnalyst(AgentBase):
return datasets, documentContext
- async def _analyzeTask(self, prompt: str, context: str, datasets: Dict, outputSpecs: List) -> Dict:
+ async def _analyzeTask(self, prompt: str, documentContext: str, datasets: Dict[str, Any], outputSpecs: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
- Use AI to analyze the task and create a plan for analysis.
+ Analyze the task requirements using AI.
Args:
prompt: The task prompt
- context: Document context text
- datasets: Dictionary of extracted datasets
+ documentContext: Context from input documents
+ datasets: Available datasets
outputSpecs: Output specifications
Returns:
Analysis plan dictionary
"""
- # Prepare dataset information
- datasetInfo = {}
- for name, df in datasets.items():
- try:
- datasetInfo[name] = {
- "shape": df.shape,
- "columns": df.columns.tolist(),
- "dtypes": {col: str(df[col].dtype) for col in df.columns},
- "sample": df.head(3).to_dict(orient='records')
- }
- except:
- datasetInfo[name] = {"error": "Could not process dataset"}
-
+ # Create analysis prompt
analysisPrompt = f"""
- Analyze this data analysis task and create a plan.
+ Analyze this data analysis task and create a detailed plan:
TASK: {prompt}
- AVAILABLE DATA:
- {json.dumps(datasetInfo, indent=2)}
-
DOCUMENT CONTEXT:
- {context[:1000]}... (truncated)
+ {documentContext}
- OUTPUT REQUIREMENTS:
+ AVAILABLE DATASETS:
+ {json.dumps(datasets, indent=2)}
+
+ REQUIRED OUTPUTS:
{json.dumps(outputSpecs, indent=2)}
- Create a detailed analysis plan in JSON format with the following structure:
+ Create a detailed analysis plan in JSON format with:
{{
- "analysisType": "statistical|trend|comparative|predictive|cluster|general",
- "keyQuestions": ["question1", "question2"],
- "recommendedVisualizations": [{{
- "type": "chart_type",
- "dataSource": "dataset_name",
- "variables": ["col1", "col2"],
- "purpose": "explanation"
- }}],
- "keyInsights": "brief summary of initial insights",
- "analysisApproach": "brief description of recommended approach"
+ "analysisSteps": [
+ {{
+ "step": "step description",
+ "purpose": "why this step is needed",
+ "datasets": ["dataset1", "dataset2"],
+ "techniques": ["technique1", "technique2"],
+ "outputs": ["output1", "output2"]
+ }}
+ ],
+ "visualizations": [
+ {{
+ "type": "visualization type",
+ "purpose": "what it shows",
+ "datasets": ["dataset1"],
+ "settings": {{"key": "value"}}
+ }}
+ ],
+ "insights": [
+ {{
+ "type": "insight type",
+ "description": "what to look for",
+ "datasets": ["dataset1"]
+ }}
+ ],
+ "feedback": "explanation of the analysis approach"
}}
- Only return valid JSON. No preamble or explanations.
+ Respond with ONLY the JSON object, no additional text or explanations.
"""
+
try:
+ # Get analysis plan from AI
response = await self.mydom.callAi([
- {"role": "system", "content": "You are a data analysis expert. Respond with valid JSON only."},
+ {"role": "system", "content": "You are a data analysis expert. Create detailed analysis plans. Respond with valid JSON only."},
{"role": "user", "content": analysisPrompt}
- ], produceUserAnswer = True)
+ ], produceUserAnswer=True)
- # Extract JSON from response
+ # Extract JSON
jsonStart = response.find('{')
jsonEnd = response.rfind('}') + 1
@@ -266,154 +257,367 @@ class AgentAnalyst(AgentBase):
plan = json.loads(response[jsonStart:jsonEnd])
return plan
else:
- # Fallback if JSON not found
+ # Fallback plan
+ logger.warning(f"Not able creating analysis plan, generating fallback plan")
return {
- "analysisType": "general",
- "keyQuestions": ["What insights can be extracted from this data?"],
- "recommendedVisualizations": [],
- "keyInsights": "Analysis plan could not be created",
- "analysisApproach": "General exploratory analysis"
+ "analysisSteps": [
+ {
+ "step": "Basic data analysis",
+ "purpose": "Understand the data structure and content",
+ "datasets": list(datasets.keys()),
+ "techniques": ["summary statistics", "data visualization"],
+ "outputs": ["summary report", "basic visualizations"]
+ }
+ ],
+ "visualizations": [
+ {
+ "type": "basic charts",
+ "purpose": "Show data distribution and relationships",
+ "datasets": list(datasets.keys()),
+ "settings": {}
+ }
+ ],
+ "insights": [
+ {
+ "type": "basic insights",
+ "description": "Key findings from the data",
+ "datasets": list(datasets.keys())
+ }
+ ],
+ "feedback": f"I'll analyze the data and provide insights about {prompt}"
}
except Exception as e:
logger.warning(f"Error creating analysis plan: {str(e)}")
+ # Simple fallback plan
return {
- "analysisType": "general",
- "keyQuestions": ["What insights can be extracted from this data?"],
- "recommendedVisualizations": [],
- "keyInsights": "Analysis plan could not be created",
- "analysisApproach": "General exploratory analysis"
+ "analysisSteps": [
+ {
+ "step": "Basic data analysis",
+ "purpose": "Understand the data structure and content",
+ "datasets": list(datasets.keys()),
+ "techniques": ["summary statistics", "data visualization"],
+ "outputs": ["summary report", "basic visualizations"]
+ }
+ ],
+ "visualizations": [
+ {
+ "type": "basic charts",
+ "purpose": "Show data distribution and relationships",
+ "datasets": list(datasets.keys()),
+ "settings": {}
+ }
+ ],
+ "insights": [
+ {
+ "type": "basic insights",
+ "description": "Key findings from the data",
+ "datasets": list(datasets.keys())
+ }
+ ],
+ "feedback": f"I'll analyze the data and provide insights about {prompt}"
+ }
+
+ async def _createAnalysisPlan(self, prompt: str) -> Dict[str, Any]:
+ """
+ Create an analysis plan based on the task prompt.
+
+ Args:
+ prompt: The task prompt
+
+ Returns:
+ Analysis plan dictionary
+ """
+ try:
+ # Create analysis prompt
+ analysisPrompt = f"""
+ Analyze this data analysis task and create a detailed plan:
+
+ TASK: {prompt}
+
+ Create a detailed analysis plan in JSON format with:
+ {{
+ "requiresAnalysis": true/false,
+ "analysisSteps": [
+ {{
+ "step": "step description",
+ "purpose": "why this step is needed",
+ "techniques": ["technique1", "technique2"],
+ "outputs": ["output1", "output2"]
+ }}
+ ],
+ "visualizations": [
+ {{
+ "type": "visualization type",
+ "purpose": "what it shows",
+ "settings": {{"key": "value"}}
+ }}
+ ],
+ "insights": [
+ {{
+ "type": "insight type",
+ "description": "what to look for"
+ }}
+ ],
+ "feedback": "explanation of the analysis approach"
+ }}
+
+ Respond with ONLY the JSON object, no additional text or explanations.
+ """
+
+ # Get analysis plan from AI
+ response = await self.mydom.callAi([
+ {"role": "system", "content": "You are a data analysis expert. Create detailed analysis plans. Respond with valid JSON only."},
+ {"role": "user", "content": analysisPrompt}
+ ], produceUserAnswer=True)
+
+ # Extract JSON
+ jsonStart = response.find('{')
+ jsonEnd = response.rfind('}') + 1
+
+ if jsonStart >= 0 and jsonEnd > jsonStart:
+ plan = json.loads(response[jsonStart:jsonEnd])
+ return plan
+ else:
+ # Fallback plan
+ logger.warning(f"Not able creating analysis plan, generating fallback plan")
+ return {
+ "requiresAnalysis": True,
+ "analysisSteps": [
+ {
+ "step": "Basic data analysis",
+ "purpose": "Understand the data structure and content",
+ "techniques": ["summary statistics", "data visualization"],
+ "outputs": ["summary report", "basic visualizations"]
+ }
+ ],
+ "visualizations": [
+ {
+ "type": "basic charts",
+ "purpose": "Show data distribution and relationships",
+ "settings": {}
+ }
+ ],
+ "insights": [
+ {
+ "type": "basic insights",
+ "description": "Key findings from the data"
+ }
+ ],
+ "feedback": f"I'll analyze the data and provide insights about {prompt}"
+ }
+
+ except Exception as e:
+ logger.warning(f"Error creating analysis plan: {str(e)}")
+ # Simple fallback plan
+ return {
+ "requiresAnalysis": True,
+ "analysisSteps": [
+ {
+ "step": "Basic data analysis",
+ "purpose": "Understand the data structure and content",
+ "techniques": ["summary statistics", "data visualization"],
+ "outputs": ["summary report", "basic visualizations"]
+ }
+ ],
+ "visualizations": [
+ {
+ "type": "basic charts",
+ "purpose": "Show data distribution and relationships",
+ "settings": {}
+ }
+ ],
+ "insights": [
+ {
+ "type": "basic insights",
+ "description": "Key findings from the data"
+ }
+ ],
+ "feedback": f"I'll analyze the data and provide insights about {prompt}"
}
async def _createVisualization(self, datasets: Dict, prompt: str, outputLabel: str,
analysisPlan: Dict, description: str) -> Dict:
"""
- Create visualization document using AI guidance.
+ Create a visualization based on the analysis plan.
Args:
datasets: Dictionary of datasets
prompt: Original task prompt
- outputLabel: Output filename
- analysisPlan: Analysis plan from AI
+ outputLabel: Output file label
+ analysisPlan: Analysis plan
description: Output description
Returns:
- Visualization document
+ Document dictionary with visualization
"""
- # Determine format from filename
- formatType = outputLabel.split('.')[-1].lower()
- if formatType not in ['png', 'jpg', 'jpeg', 'svg']:
- formatType = 'png'
-
- # If no datasets available, create error message image
- if not datasets:
- plt.figure(figsize=(10, 6))
- plt.text(0.5, 0.5, "No data available for visualization",
- ha='center', va='center', fontsize=14)
- plt.tight_layout()
- imgData = self._getImageBase64(formatType)
- plt.close()
-
- return {
- "label": outputLabel,
- "content": imgData,
- "metadata": {
- "contentType": f"image/{formatType}"
- }
- }
-
- # Get recommended visualization from plan
- recommendedViz = analysisPlan.get("recommendedVisualizations", [])
-
- # Prepare dataset info for the first dataset if none specified
- if not recommendedViz and datasets:
- name, df = next(iter(datasets.items()))
- recommendedViz = [{
- "type": "auto",
- "dataSource": name,
- "variables": df.columns.tolist()[:5],
- "purpose": "general analysis"
- }]
-
- # Create visualization code prompt
- vizPrompt = f"""
- Generate Python matplotlib/seaborn code to create a visualization for:
-
- TASK: {prompt}
-
- VISUALIZATION REQUIREMENTS:
- - Output format: {formatType}
- - Filename: {outputLabel}
- - Description: {description}
-
- RECOMMENDED VISUALIZATION:
- {json.dumps(recommendedViz, indent=2)}
-
- AVAILABLE DATASETS:
- """
-
- # Add dataset info for recommended sources
- for viz in recommendedViz:
- dataSource = viz.get("dataSource")
- if dataSource in datasets:
- df = datasets[dataSource]
- vizPrompt += f"\nDataset '{dataSource}':\n"
- vizPrompt += f"- Shape: {df.shape}\n"
- vizPrompt += f"- Columns: {df.columns.tolist()}\n"
- vizPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n"
-
- vizPrompt += """
- Generate ONLY Python code that:
- 1. Uses matplotlib and/or seaborn to create a clear visualization
- 2. Sets figure size to (10, 6)
- 3. Includes appropriate titles, labels, and legend
- 4. Uses professional color schemes
- 5. Handles any missing data gracefully
-
- Return ONLY executable Python code, no explanations or markdown.
- """
-
try:
- # Get visualization code from AI
- vizCode = await self.mydom.callAi([
- {"role": "system", "content": "You are a data visualization expert. Provide only executable Python code."},
- {"role": "user", "content": vizPrompt}
- ], produceUserAnswer = True)
+ # Get visualization recommendations
+ vizRecommendations = analysisPlan.get("visualizations", [])
- # Clean code
- vizCode = vizCode.replace("```python", "").replace("```", "").strip()
-
- # Execute visualization code
- plt.figure(figsize=(10, 6))
-
- # Make local variables available to the code
- localVars = {
- "plt": plt,
- "sns": sns,
- "pd": pd,
- "np": __import__('numpy')
- }
-
- # Add datasets to local variables
- for name, df in datasets.items():
- # Create a sanitized variable name
- varName = ''.join(c if c.isalnum() else '_' for c in name)
- localVars[varName] = df
+ if not vizRecommendations:
+ # Generate visualization recommendations if none provided
+ self.mydom.logAdd(analysisPlan.get("workflowId"), "Generating visualization recommendations...", level="info", progress=50)
+ vizPrompt = f"""
+ Based on this data and task, recommend appropriate visualizations.
- # Also add with standard names for simpler code
- if "df" not in localVars:
- localVars["df"] = df
- elif "df2" not in localVars:
- localVars["df2"] = df
+ TASK: {prompt}
+ DESCRIPTION: {description}
+
+ DATASETS:
+ {json.dumps({name: {"shape": df.shape, "columns": df.columns.tolist()}
+ for name, df in datasets.items()}, indent=2)}
+
+ Recommend visualizations in JSON format:
+ {{
+ "visualizations": [
+ {{
+ "type": "chart_type",
+ "dataSource": "dataset_name",
+ "variables": ["col1", "col2"],
+ "purpose": "explanation"
+ }}
+ ]
+ }}
+ """
+
+ response = await self.mydom.callAi([
+ {"role": "system", "content": "You are a data visualization expert. Recommend appropriate visualizations based on the data and task."},
+ {"role": "user", "content": vizPrompt}
+ ])
+
+ # Extract JSON
+ jsonStart = response.find('{')
+ jsonEnd = response.rfind('}') + 1
+
+ if jsonStart >= 0 and jsonEnd > jsonStart:
+ vizData = json.loads(response[jsonStart:jsonEnd])
+ vizRecommendations = vizData.get("visualizations", [])
- # Execute the visualization code
- exec(vizCode, globals(), localVars)
+ # Determine format from filename
+ formatType = outputLabel.split('.')[-1].lower()
+ if formatType not in ['png', 'jpg', 'jpeg', 'svg']:
+ formatType = 'png'
- # Capture the image
- imgData = self._getImageBase64(formatType)
- plt.close()
+ # If no datasets available, create error message image
+ if not datasets:
+ plt.figure(figsize=(10, 6))
+ plt.text(0.5, 0.5, "No data available for visualization",
+ ha='center', va='center', fontsize=14)
+ plt.tight_layout()
+ imgData = self._getImageBase64(formatType)
+ plt.close()
+
+ return {
+ "label": outputLabel,
+ "content": imgData,
+ "metadata": {
+ "contentType": f"image/{formatType}"
+ }
+ }
- return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
+ # Prepare dataset info for the first dataset if none specified
+ if not vizRecommendations and datasets:
+ name, df = next(iter(datasets.items()))
+ vizRecommendations = [{
+ "type": "auto",
+ "dataSource": name,
+ "variables": df.columns.tolist()[:5],
+ "purpose": "general analysis"
+ }]
+
+ # Create visualization code prompt
+ vizPrompt = f"""
+ Generate Python matplotlib/seaborn code to create a visualization for:
+
+ TASK: {prompt}
+
+ VISUALIZATION REQUIREMENTS:
+ - Output format: {formatType}
+ - Filename: {outputLabel}
+ - Description: {description}
+
+ RECOMMENDED VISUALIZATION:
+ {json.dumps(vizRecommendations, indent=2)}
+
+ AVAILABLE DATASETS:
+ """
+
+ # Add dataset info for recommended sources
+ for viz in vizRecommendations:
+ dataSource = viz.get("dataSource")
+ if dataSource in datasets:
+ df = datasets[dataSource]
+ vizPrompt += f"\nDataset '{dataSource}':\n"
+ vizPrompt += f"- Shape: {df.shape}\n"
+ vizPrompt += f"- Columns: {df.columns.tolist()}\n"
+ vizPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n"
+
+ vizPrompt += """
+ Generate ONLY Python code that:
+ 1. Uses matplotlib and/or seaborn to create a clear visualization
+ 2. Sets figure size to (10, 6)
+ 3. Includes appropriate titles, labels, and legend
+ 4. Uses professional color schemes
+ 5. Handles any missing data gracefully
+
+ Return ONLY executable Python code, no explanations or markdown.
+ """
+
+ try:
+ # Get visualization code from AI
+ vizCode = await self.mydom.callAi([
+ {"role": "system", "content": "You are a data visualization expert. Provide only executable Python code."},
+ {"role": "user", "content": vizPrompt}
+ ], produceUserAnswer = True)
+
+ # Clean code
+ vizCode = vizCode.replace("```python", "").replace("```", "").strip()
+
+ # Execute visualization code
+ plt.figure(figsize=(10, 6))
+
+ # Make local variables available to the code
+ localVars = {
+ "plt": plt,
+ "sns": sns,
+ "pd": pd,
+ "np": __import__('numpy')
+ }
+
+ # Add datasets to local variables
+ for name, df in datasets.items():
+ # Create a sanitized variable name
+ varName = ''.join(c if c.isalnum() else '_' for c in name)
+ localVars[varName] = df
+
+ # Also add with standard names for simpler code
+ if "df" not in localVars:
+ localVars["df"] = df
+ elif "df2" not in localVars:
+ localVars["df2"] = df
+
+ # Execute the visualization code
+ exec(vizCode, globals(), localVars)
+
+ # Capture the image
+ imgData = self._getImageBase64(formatType)
+ plt.close()
+
+ return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
+
+ except Exception as e:
+ logger.error(f"Error creating visualization: {str(e)}", exc_info=True)
+
+ # Create error message image
+ plt.figure(figsize=(10, 6))
+ plt.text(0.5, 0.5, f"Visualization error: {str(e)}",
+ ha='center', va='center', fontsize=12)
+ plt.tight_layout()
+ imgData = self._getImageBase64(formatType)
+ plt.close()
+
+ return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
except Exception as e:
logger.error(f"Error creating visualization: {str(e)}", exc_info=True)
@@ -664,6 +868,102 @@ class AgentAnalyst(AgentBase):
# Convert to base64
return base64.b64encode(imageData).decode('utf-8')
+ async def _analyzeData(self, task: Dict[str, Any], analysisPlan: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Analyze data based on the analysis plan.
+
+ Args:
+ task: Task dictionary with input documents and specifications
+ analysisPlan: Analysis plan from _createAnalysisPlan
+
+ Returns:
+ Analysis results dictionary
+ """
+ try:
+ # Extract data from input documents
+ inputDocuments = task.get("inputDocuments", [])
+ datasets, documentContext = self._extractData(inputDocuments)
+
+ # Get task information
+ prompt = task.get("prompt", "")
+ outputSpecs = task.get("outputSpecifications", [])
+
+ # Analyze task requirements
+ analysisResults = await self._analyzeTask(prompt, documentContext, datasets, outputSpecs)
+
+ # Add datasets and context to results
+ analysisResults["datasets"] = datasets
+ analysisResults["documentContext"] = documentContext
+
+ return analysisResults
+
+ except Exception as e:
+ logger.error(f"Error analyzing data: {str(e)}", exc_info=True)
+ return {
+ "error": str(e),
+ "datasets": {},
+ "documentContext": ""
+ }
+
+ async def _createOutputDocuments(self, prompt: str, analysisResults: Dict[str, Any],
+ outputSpecs: List[Dict[str, Any]], analysisPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
+ """
+ Create output documents based on analysis results.
+
+ Args:
+ prompt: Original task prompt
+ analysisResults: Results from data analysis
+ outputSpecs: List of output specifications
+ analysisPlan: Analysis plan from _createAnalysisPlan
+
+ Returns:
+ List of document objects
+ """
+ documents = []
+ datasets = analysisResults.get("datasets", {})
+ documentContext = analysisResults.get("documentContext", "")
+
+ # Process each output specification
+ for spec in outputSpecs:
+ outputLabel = spec.get("label", "")
+ outputDescription = spec.get("description", "")
+
+ # Determine format from filename
+ formatType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "txt"
+
+ try:
+ # Create appropriate document based on format
+ if formatType in ["png", "jpg", "jpeg", "svg"]:
+ # Visualization output
+ document = await self._createVisualization(
+ datasets, prompt, outputLabel, analysisPlan, outputDescription
+ )
+ elif formatType in ["csv", "json", "xlsx"]:
+ # Data document output
+ document = await self._createDataDocument(
+ datasets, prompt, outputLabel, analysisPlan, outputDescription
+ )
+ else:
+ # Text document output (markdown, html, text)
+ document = await self._createTextDocument(
+ datasets, documentContext, prompt, outputLabel, formatType,
+ analysisPlan, outputDescription
+ )
+
+ documents.append(document)
+
+ except Exception as e:
+ logger.error(f"Error creating output document {outputLabel}: {str(e)}", exc_info=True)
+ # Create error document
+ errorDoc = self.formatAgentDocumentOutput(
+ outputLabel,
+ f"Error creating document: {str(e)}",
+ "text/plain"
+ )
+ documents.append(errorDoc)
+
+ return documents
+
# Factory function for the Analyst agent
def getAgentAnalyst():
diff --git a/modules/agentCoach.py b/modules/agentCoach.py
index 6a0616bf..5995dff7 100644
--- a/modules/agentCoach.py
+++ b/modules/agentCoach.py
@@ -33,8 +33,7 @@ class AgentCoach(AgentBase):
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
- self.mydom = mydom
-
+
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task by directly using AI to provide answers or content based on extracted data.
diff --git a/modules/agentCoder.py b/modules/agentCoder.py
index 60dc0072..8950e6c7 100644
--- a/modules/agentCoder.py
+++ b/modules/agentCoder.py
@@ -41,8 +41,7 @@ class AgentCoder(AgentBase):
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
- self.mydom = mydom
-
+
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task and perform code development/execution.
diff --git a/modules/agentDocumentation.py b/modules/agentDocumentation.py
index 38b401d2..259795fe 100644
--- a/modules/agentDocumentation.py
+++ b/modules/agentDocumentation.py
@@ -30,8 +30,7 @@ class AgentDocumentation(AgentBase):
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
- self.mydom = mydom
-
+
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task by focusing on required outputs and using AI to generate them.
diff --git a/modules/agentEmail.py b/modules/agentEmail.py
index effd0591..116e58fc 100644
--- a/modules/agentEmail.py
+++ b/modules/agentEmail.py
@@ -7,8 +7,8 @@ import logging
import json
import base64
import os
-import msal
import requests
+import msal
from typing import Dict, Any, List, Optional
from modules.configuration import APP_CONFIG
@@ -41,15 +41,11 @@ class AgentEmail(AgentBase):
self.authority = None
self.scopes = ["Mail.ReadWrite", "User.Read"]
- # Token storage directory
- self.token_dir = './token_storage'
- if not os.path.exists(self.token_dir):
- os.makedirs(self.token_dir)
- logger.info(f"Created token storage directory: {self.token_dir}")
+ # API base URL for Microsoft authentication
+ self.api_base_url = APP_CONFIG.get("APP_API_URL", "(no-url)")
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
- self.mydom = mydom
self._loadConfiguration()
def _loadConfiguration(self):
@@ -84,6 +80,7 @@ class AgentEmail(AgentBase):
# Extract task information
prompt = task.get("prompt", "")
inputDocuments = task.get("inputDocuments", [])
+ outputSpecs = task.get("outputSpecifications", [])
# Check AI service
if not self.mydom:
@@ -131,22 +128,36 @@ class AgentEmail(AgentBase):
# Prepare output documents
documents = []
- # Add HTML preview document
- previewDoc = self.formatAgentDocumentOutput(
- "email_preview.html",
- htmlPreview,
- "text/html"
- )
- documents.append(previewDoc)
-
- # Add email template as JSON for reference
- templateJson = json.dumps(emailTemplate, indent=2)
- templateDoc = self.formatAgentDocumentOutput(
- "email_template.json",
- templateJson,
- "application/json"
- )
- documents.append(templateDoc)
+ # Process output specifications
+ for spec in outputSpecs:
+ label = spec.get("label", "")
+ description = spec.get("description", "")
+
+ if label.endswith(".html"):
+ # Create the HTML template file
+ templateDoc = self.formatAgentDocumentOutput(
+ label,
+ emailTemplate["htmlBody"], # Use the actual HTML body, not the preview
+ "text/html"
+ )
+ documents.append(templateDoc)
+ elif label.endswith(".json"):
+ # Create JSON template if requested
+ templateJson = json.dumps(emailTemplate, indent=2)
+ templateDoc = self.formatAgentDocumentOutput(
+ label,
+ templateJson,
+ "application/json"
+ )
+ documents.append(templateDoc)
+ else:
+ # Default to preview for other cases
+ previewDoc = self.formatAgentDocumentOutput(
+ label,
+ htmlPreview,
+ "text/html"
+ )
+ documents.append(previewDoc)
# Prepare feedback message
if draft_result:
@@ -233,28 +244,20 @@ class AgentEmail(AgentBase):
# Add document name to contents
documentContents.append(f"\n\n--- {docName} ---\n")
- # Process contents
- hasAttachment = False
- for content in doc.get("contents", []):
- # Add extracted text to document contents
- if content.get("dataExtracted"):
- documentContents.append(content.get("dataExtracted", ""))
-
- # Prepare attachment if it has content data
- if content.get("data"):
- # Check if this content should be an attachment
- # Typically files like PDFs, images, etc.
- contentType = content.get("contentType", "")
- if (not contentType.startswith("text/") or
- contentType in ["application/pdf", "application/msword"]):
- hasAttachment = True
-
- # If document has content to attach, add to attachments
- if hasAttachment:
+ # Process document data directly
+ if doc.get("data"):
+ # Add to attachments with proper metadata
attachments.append({
"name": docName,
- "document": doc
+ "document": {
+ "data": doc["data"],
+ "mimeType": doc.get("mimeType", "application/octet-stream"),
+ "base64Encoded": doc.get("base64Encoded", False)
+ }
})
+ documentContents.append(f"Document attached: {docName}")
+ else:
+ documentContents.append(f"Document referenced: {docName}")
return "\n".join(documentContents), attachments
@@ -294,7 +297,7 @@ class AgentEmail(AgentBase):
try:
response = await self.mydom.callAi([
- {"role": "system", "content": "You are an email template specialist. Respond with valid JSON only."},
+ {"role": "system", "content": "You are an email template specialist. Create professional emails. Respond with valid JSON only."},
{"role": "user", "content": emailPrompt}
], produceUserAnswer=True)
@@ -306,7 +309,8 @@ class AgentEmail(AgentBase):
template = json.loads(response[jsonStart:jsonEnd])
return template
else:
- # Fallback if JSON not found
+ # Fallback plan
+ logger.warning(f"Not able creating email template, generating fallback plan")
return {
"recipient": "recipient@example.com",
"subject": "Information Regarding Your Request",
@@ -377,125 +381,86 @@ class AgentEmail(AgentBase):
"""
return html
- def _getCurrentUserToken(self):
+ def _getCurrentUserToken(self) -> tuple:
"""
- Get the current user's token from the token store.
- Does not attempt to initiate authentication flow.
-
- Returns:
- Tuple of (user info, access token) or (None, None) if no valid token
+ Get the current user's Microsoft token using the current user context.
+ Returns tuple of (user_info, access_token) or (None, None) if not authenticated.
"""
try:
- # Check if we have any token files
- if not os.path.exists(self.token_dir) or not os.listdir(self.token_dir):
- logger.warning("No token files found. User needs to authenticate with Microsoft.")
+ if not self.mydom:
+ logger.error("No mydom interface available")
return None, None
- # Find the most recently modified token file
- token_files = [os.path.join(self.token_dir, f) for f in os.listdir(self.token_dir) if f.endswith('.json')]
- if not token_files:
+ # Get token data from database
+ token_data = self.mydom.getMsftToken()
+ if not token_data:
+ logger.info("No Microsoft token found for user")
return None, None
- most_recent = max(token_files, key=os.path.getmtime)
- user_id = os.path.basename(most_recent).split('.')[0]
+ # Verify token is still valid
+ if not self._verifyToken(token_data.get("access_token")):
+ logger.info("Token invalid, attempting refresh")
+ if not self._refreshToken(token_data):
+ logger.info("Token refresh failed")
+ return None, None
+ # Get updated token data after refresh
+ token_data = self.mydom.getMsftToken()
+
+ return token_data.get("user_info"), token_data.get("access_token")
- # Load the token
- token_data = self._loadTokenFromFile(user_id)
- if not token_data or not token_data.get("access_token"):
- logger.warning(f"No valid token data for user {user_id}")
- return None, None
-
- # Get user info from token
- user_info = self._getUserInfoFromToken(token_data["access_token"])
- if not user_info:
- # Try to refresh the token
- if self._refreshToken(user_id):
- # Load the refreshed token
- token_data = self._loadTokenFromFile(user_id)
- if token_data and token_data.get("access_token"):
- user_info = self._getUserInfoFromToken(token_data["access_token"])
- if user_info:
- return user_info, token_data["access_token"]
-
- logger.warning(f"Could not get user info for user {user_id}")
- return None, None
-
- return user_info, token_data["access_token"]
except Exception as e:
logger.error(f"Error getting current user token: {str(e)}")
return None, None
-
- def _loadTokenFromFile(self, user_id):
- """Load token data from a file"""
- filename = os.path.join(self.token_dir, f"{user_id}.json")
- if os.path.exists(filename):
- try:
- with open(filename, 'r') as f:
- return json.load(f)
- except Exception as e:
- logger.error(f"Error loading token file: {str(e)}")
- return None
- return None
-
- def _getUserInfoFromToken(self, access_token):
- """Get user information using the access token"""
- headers = {
- 'Authorization': f'Bearer {access_token}',
- 'Content-Type': 'application/json'
- }
-
+
+ def _verifyToken(self, token: str) -> bool:
+ """Verify the access token is valid"""
try:
+ headers = {
+ 'Authorization': f'Bearer {token}',
+ 'Content-Type': 'application/json'
+ }
+
response = requests.get('https://graph.microsoft.com/v1.0/me', headers=headers)
- if response.status_code == 200:
- user_data = response.json()
- return {
- "name": user_data.get("displayName", ""),
- "email": user_data.get("userPrincipalName", ""),
- "id": user_data.get("id", "")
- }
- else:
- logger.error(f"Error getting user info: {response.status_code} - {response.text}")
- return None
+ return response.status_code == 200
+
except Exception as e:
- logger.error(f"Exception getting user info: {str(e)}")
- return None
-
- def _refreshToken(self, user_id):
+ logger.error(f"Error verifying token: {str(e)}")
+ return False
+
+ def _refreshToken(self, token_data: Dict[str, Any]) -> bool:
"""Refresh the access token using the stored refresh token"""
- token_data = self._loadTokenFromFile(user_id)
- if not token_data or not token_data.get("refresh_token"):
- logger.warning("No refresh token available")
- return False
-
- msal_app = msal.ConfidentialClientApplication(
- self.client_id,
- authority=self.authority,
- client_credential=self.client_secret
- )
-
- result = msal_app.acquire_token_by_refresh_token(
- token_data["refresh_token"],
- scopes=self.scopes
- )
-
- if "error" in result:
- logger.error(f"Error refreshing token: {result.get('error')}")
- return False
-
- # Update tokens in storage
- token_data["access_token"] = result["access_token"]
- if "refresh_token" in result:
- token_data["refresh_token"] = result["refresh_token"]
-
- # Save the updated token
- filename = os.path.join(self.token_dir, f"{user_id}.json")
try:
- with open(filename, 'w') as f:
- json.dump(token_data, f)
- logger.info(f"Token saved for user: {user_id}")
+ if not token_data or not token_data.get("refresh_token"):
+ logger.warning("No refresh token available")
+ return False
+
+ msal_app = msal.ConfidentialClientApplication(
+ self.client_id,
+ authority=self.authority,
+ client_credential=self.client_secret
+ )
+
+ result = msal_app.acquire_token_by_refresh_token(
+ token_data["refresh_token"],
+ scopes=self.scopes
+ )
+
+ if "error" in result:
+ logger.error(f"Error refreshing token: {result.get('error')}")
+ return False
+
+ # Update token data
+ token_data["access_token"] = result["access_token"]
+ if "refresh_token" in result:
+ token_data["refresh_token"] = result["refresh_token"]
+
+ # Save updated token
+ self.mydom.saveMsftToken(token_data)
+ logger.info("Access token refreshed successfully")
return True
+
except Exception as e:
- logger.error(f"Error saving token file: {str(e)}")
+ logger.error(f"Error refreshing token: {str(e)}")
return False
def _createDraftEmail(self, recipient, subject, body, attachments=None):
@@ -522,8 +487,8 @@ class AgentEmail(AgentBase):
def _createGraphDraftEmail(self, access_token, recipient, subject, body, attachments=None):
"""
- Create a draft email using Microsoft Graph API with fixed attachment handling.
- Directly uses the document's data attribute for attachments.
+ Create a draft email using Microsoft Graph API.
+ Treats all files as binary attachments without content analysis.
Args:
access_token: Microsoft Graph access token
@@ -540,7 +505,7 @@ class AgentEmail(AgentBase):
'Content-Type': 'application/json'
}
- # Prepare email data
+ # Prepare email data with proper structure
email_data = {
'subject': subject,
'body': {
@@ -561,94 +526,84 @@ class AgentEmail(AgentBase):
email_data['attachments'] = []
for attachment in attachments:
- # Get the document object
doc = attachment.get('document', {})
file_name = attachment.get('name', 'attachment.file')
logger.info(f"Processing attachment: {file_name}")
- # Directly access the data attribute from the document
- if 'data' in doc:
- file_content = doc['data']
- is_base64 = doc.get('base64Encoded', False)
-
- # Determine content type
- content_type = "application/octet-stream"
- if 'mimeType' in doc:
- content_type = doc['mimeType']
- elif 'contentType' in doc:
- content_type = doc['contentType']
-
- # Check if we need to encode the content
- if not is_base64:
- logger.info(f"Base64 encoding content for {file_name}")
+ # Get the document data directly
+ file_content = doc.get('data')
+ if not file_content:
+ logger.warning(f"No data found for attachment: {file_name}")
+ continue
+
+ # Get content type from document metadata
+ mime_type = doc.get('mimeType', 'application/octet-stream')
+ is_base64 = doc.get('base64Encoded', False)
+
+ # Handle content encoding
+ try:
+ if is_base64:
+ # Content is already base64 encoded
+ content_bytes = file_content
+ else:
+ # Content needs to be base64 encoded
if isinstance(file_content, str):
- try:
- # Check if already valid base64
- base64.b64decode(file_content)
- logger.info("Content appears to be valid base64 already")
- except:
- # Not valid base64, encode it
- logger.info("Encoding string content to base64")
- file_content = base64.b64encode(file_content.encode('utf-8')).decode('utf-8')
+ # For text files, encode the string to bytes first
+ content_bytes = base64.b64encode(file_content.encode('utf-8')).decode('utf-8')
elif isinstance(file_content, bytes):
- logger.info("Encoding bytes content to base64")
- file_content = base64.b64encode(file_content).decode('utf-8')
+ # For binary files, encode directly
+ content_bytes = base64.b64encode(file_content).decode('utf-8')
+ else:
+ logger.warning(f"Unexpected content type for {file_name}")
+ continue
+
+ # Calculate size from decoded content
+ decoded_size = len(base64.b64decode(content_bytes))
# Add attachment to email data
- logger.info(f"Adding attachment: {file_name} ({content_type})")
+ logger.info(f"Adding attachment: {file_name} ({mime_type}, size: {decoded_size} bytes)")
attachment_data = {
'@odata.type': '#microsoft.graph.fileAttachment',
'name': file_name,
- 'contentType': content_type,
- 'contentBytes': file_content
+ 'contentType': mime_type,
+ 'contentBytes': content_bytes,
+ 'isInline': False,
+ 'size': decoded_size
}
email_data['attachments'].append(attachment_data)
logger.info(f"Successfully added attachment: {file_name}")
- else:
- logger.warning(f"Document does not contain 'data' attribute: {file_name}")
- # Try to find data in the fileId
- if 'fileId' in doc:
- logger.info(f"Found fileId: {doc['fileId']} - could implement fileId-based attachment lookup here")
- # Future enhancement: implement file lookup by fileId
+
+ except Exception as e:
+ logger.error(f"Error processing attachment {file_name}: {str(e)}")
+ continue
- # Try to create draft using drafts folder endpoint (Option 1)
+ # Try to create draft using drafts folder endpoint
try:
- logger.info("Attempting to create draft email using drafts folder endpoint")
+ logger.info("Attempting to create draft email using messages endpoint")
logger.info(f"Email data structure: subject={subject}, recipient={recipient}, " +
- f"has_attachments={bool(email_data.get('attachments'))}, " +
- f"attachment_count={len(email_data.get('attachments', []))}")
+ f"has_attachments={bool(email_data.get('attachments'))}, " +
+ f"attachment_count={len(email_data.get('attachments', []))}")
+ # Create the draft message
response = requests.post(
- 'https://graph.microsoft.com/v1.0/me/mailFolders/drafts/messages',
+ 'https://graph.microsoft.com/v1.0/me/messages',
headers=headers,
json=email_data
)
if response.status_code >= 200 and response.status_code < 300:
- logger.info("Successfully created draft email using drafts folder endpoint")
+ logger.info("Successfully created draft email using messages endpoint")
return response.json()
else:
- logger.error(f"Drafts folder method failed: {response.status_code} - {response.text}")
-
- # Try fallback method with messages endpoint (Option 2)
- logger.info("Trying fallback with messages endpoint")
- response = requests.post(
- 'https://graph.microsoft.com/v1.0/me/messages',
- headers=headers,
- json=email_data
- )
-
- if response.status_code >= 200 and response.status_code < 300:
- logger.info("Successfully created draft email using messages endpoint")
- return response.json()
- else:
- logger.error(f"Messages endpoint method also failed: {response.status_code} - {response.text}")
- return None
+ logger.error(f"Messages endpoint method failed: {response.status_code} - {response.text}")
+ logger.error(f"Request headers: {headers}")
+ logger.error(f"Request body: {json.dumps(email_data, indent=2)}")
+ return None
except Exception as e:
logger.error(f"Exception creating draft email: {str(e)}", exc_info=True)
- return None
+ return None
# Factory function for the Email agent
def getAgentEmail():
diff --git a/modules/agentWebcrawler.py b/modules/agentWebcrawler.py
index 7f5cad09..56cac5b1 100644
--- a/modules/agentWebcrawler.py
+++ b/modules/agentWebcrawler.py
@@ -52,7 +52,6 @@ class AgentWebcrawler(AgentBase):
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
- self.mydom = mydom
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
@@ -68,6 +67,7 @@ class AgentWebcrawler(AgentBase):
# Extract task information
prompt = task.get("prompt", "")
outputSpecs = task.get("outputSpecifications", [])
+ workflow = task.get("context", {}).get("workflow", {})
# Check AI service
if not self.mydom:
@@ -77,6 +77,8 @@ class AgentWebcrawler(AgentBase):
}
# Create research plan
+ if workflow:
+ self.workflowManager.logAdd(workflow, "Creating research plan...", level="info", progress=35)
researchPlan = await self._createResearchPlan(prompt)
# Check if this is truly a web research task
@@ -87,9 +89,13 @@ class AgentWebcrawler(AgentBase):
}
# Gather raw material through web research
- rawResults = await self._gatherResearchMaterial(researchPlan)
+ if workflow:
+ self.workflowManager.logAdd(workflow, "Gathering research material...", level="info", progress=45)
+ rawResults = await self._gatherResearchMaterial(researchPlan, workflow)
# Format results into requested output documents
+ if workflow:
+ self.workflowManager.logAdd(workflow, "Creating output documents...", level="info", progress=55)
documents = await self._createOutputDocuments(
prompt,
rawResults,
@@ -142,9 +148,9 @@ class AgentWebcrawler(AgentBase):
try:
# Get research plan from AI
response = await self.mydom.callAi([
- {"role": "system", "content": "You are a web research planning expert. Create precise research plans in JSON format only."},
+ {"role": "system", "content": "You are a web research planning expert. Create precise research plans. Respond with valid JSON only."},
{"role": "user", "content": researchPrompt}
- ])
+ ], produceUserAnswer=True)
# Extract JSON
jsonStart = response.find('{')
@@ -188,12 +194,13 @@ class AgentWebcrawler(AgentBase):
"feedback": f"I'll conduct web research on '{prompt}' and gather relevant information."
}
- async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
+ async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any], workflow: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Gather research material based on the research plan.
Args:
researchPlan: Research plan dictionary
+ workflow: Current workflow object
Returns:
List of research results
@@ -202,7 +209,10 @@ class AgentWebcrawler(AgentBase):
# Process direct URLs
directUrls = researchPlan.get("directUrls", [])[:self.maxUrl]
- for url in directUrls:
+ for i, url in enumerate(directUrls):
+ progress = 45 + int((i / len(directUrls)) * 5) # Progress from 45% to 50%
+ if hasattr(self, 'workflowManager') and self.workflowManager:
+ self.workflowManager.logAdd(workflow, f"Processing direct URL {i+1}/{len(directUrls)}...", level="info", progress=progress)
logger.info(f"Processing direct URL: {url}")
try:
# Fetch and extract content
@@ -226,7 +236,10 @@ class AgentWebcrawler(AgentBase):
# Process search terms
searchTerms = researchPlan.get("searchTerms", [])[:self.maxSearchTerms]
- for term in searchTerms:
+ for i, term in enumerate(searchTerms):
+ progress = 50 + int((i / len(searchTerms)) * 5) # Progress from 50% to 55%
+ if hasattr(self, 'workflowManager') and self.workflowManager:
+ self.workflowManager.logAdd(workflow, f"Searching term {i+1}/{len(searchTerms)}...", level="info", progress=progress)
logger.info(f"Searching for: {term}")
try:
# Perform search
@@ -255,7 +268,7 @@ class AgentWebcrawler(AgentBase):
if len(allResults) >= self.maxResults:
break
- # Create summaries in parallel for all results
+ # Create summaries for all results
allResults = await self._summarizeAllResults(allResults, researchPlan)
return allResults
@@ -302,19 +315,15 @@ class AgentWebcrawler(AgentBase):
Only include information actually found in the content. No fabrications or assumptions.
"""
- if self.mydom:
- summary = await self.mydom.callAi([
- {"role": "system", "content": "You summarize web content accurately and concisely, focusing only on what is actually in the content."},
- {"role": "user", "content": summaryPrompt}
- ])
-
- # Store the summary
- result["summary"] = summary
- else:
- # Fallback if no AI service
- logger.warning(f"Not able to summarize result, using fallback plan.")
- result["summary"] = f"Content from {result['url']} ({len(content)} characters)"
-
+ # Get summary from AI
+ summary = await self.mydom.callAi([
+ {"role": "system", "content": "You are a web content summarization expert. Create concise summaries."},
+ {"role": "user", "content": summaryPrompt}
+ ], produceUserAnswer=True)
+
+ # Add summary to result
+ result["summary"] = summary.strip()
+
except Exception as e:
logger.warning(f"Error summarizing result {i+1}: {str(e)}")
result["summary"] = f"Error creating summary: {str(e)}"
diff --git a/modules/documentProcessor.py b/modules/documentProcessor.py
index d3b637e1..ee48dc07 100644
--- a/modules/documentProcessor.py
+++ b/modules/documentProcessor.py
@@ -17,6 +17,10 @@ pdfExtractorLoaded = False
officeExtractorLoaded = False
imageProcessorLoaded = False
+class FileProcessingError(Exception):
+ """Custom exception for file processing errors."""
+ pass
+
def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]:
"""
Main function for extracting content from a file based on its MIME type.
@@ -38,8 +42,50 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis
# Extract content based on MIME type
contents = []
+ # Try to detect actual file type from content for unknown MIME types
+ if mimeType == "application/octet-stream":
+ # Check file extension first
+ ext = os.path.splitext(fileName)[1].lower()
+ if ext:
+ # Map common extensions to MIME types
+ ext_to_mime = {
+ '.txt': 'text/plain',
+ '.md': 'text/markdown',
+ '.csv': 'text/csv',
+ '.json': 'application/json',
+ '.xml': 'application/xml',
+ '.js': 'application/javascript',
+ '.py': 'application/x-python',
+ '.svg': 'image/svg+xml',
+ '.jpg': 'image/jpeg',
+ '.jpeg': 'image/jpeg',
+ '.png': 'image/png',
+ '.gif': 'image/gif',
+ '.pdf': 'application/pdf',
+ '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
+ '.doc': 'application/msword',
+ '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
+ '.xls': 'application/vnd.ms-excel',
+ '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
+ '.ppt': 'application/vnd.ms-powerpoint'
+ }
+ if ext in ext_to_mime:
+ mimeType = ext_to_mime[ext]
+ logger.info(f"Detected MIME type {mimeType} from extension {ext}")
+ else:
+ logger.warning(f"Unknown file extension {ext} for file {fileName}")
+
+ # Try to detect if it's text content
+ try:
+ text_content = fileContent.decode('utf-8')
+ logger.info(f"Successfully decoded file {fileName} as text")
+ contents.extend(extractTextContent(fileName, fileContent, "text/plain"))
+ except UnicodeDecodeError:
+ logger.info(f"File {fileName} is not text, treating as binary")
+ contents.extend(extractBinaryContent(fileName, fileContent, mimeType))
+
# Text-based formats (excluding CSV which has its own handler)
- if mimeType == "text/csv":
+ elif mimeType == "text/csv":
contents.extend(extractCsvContent(fileName, fileContent))
# Then handle other text-based formats
@@ -86,6 +132,7 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis
# Binary data as fallback for unknown formats
else:
+ logger.warning(f"Unknown MIME type {mimeType} for file {fileName}, treating as binary")
contents.extend(extractBinaryContent(fileName, fileContent, mimeType))
# Fallback when no content could be extracted
@@ -99,7 +146,7 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis
"sequenceNr": 1,
"name": '1_undefined',
"ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin",
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": encoded_data,
"base64Encoded": True,
"metadata": {
@@ -130,13 +177,13 @@ def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> Lis
return contents
except Exception as e:
- logger.error(f"Error during content extraction: {str(e)}")
+ logger.error(f"Error during content extraction for file {fileMetadata.get('name', 'unknown')}: {str(e)}", exc_info=True)
# Fallback on error - return original data
return [{
"sequenceNr": 1,
"name": fileMetadata.get("name", "unknown"),
"ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin",
- "contentType": fileMetadata.get("mimeType", "application/octet-stream"),
+ "mimeType": fileMetadata.get("mimeType", "application/octet-stream"),
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -206,7 +253,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_text", # Simplified naming
"ext": fileExtension,
- "contentType": "text/plain",
+ "mimeType": "text/plain",
"data": textContent,
"base64Encoded": False,
"metadata": {
@@ -225,7 +272,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_text", # Simplified naming
"ext": fileExtension,
- "contentType": "text/plain",
+ "mimeType": "text/plain",
"data": textContent,
"base64Encoded": False,
"metadata": {
@@ -242,7 +289,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -256,7 +303,7 @@ def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -282,7 +329,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_csv", # Simplified naming
"ext": "csv",
- "contentType": "text/csv",
+ "mimeType": "text/csv",
"data": csvContent,
"base64Encoded": False,
"metadata": {
@@ -302,7 +349,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_csv", # Simplified naming
"ext": "csv",
- "contentType": "text/csv",
+ "mimeType": "text/csv",
"data": csvContent,
"base64Encoded": False,
"metadata": {
@@ -319,7 +366,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "csv",
- "contentType": "text/csv",
+ "mimeType": "text/csv",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -332,7 +379,7 @@ def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "csv",
- "contentType": "text/csv",
+ "mimeType": "text/csv",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -364,7 +411,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_svg", # Simplified naming
"ext": "svg",
- "contentType": "image/svg+xml",
+ "mimeType": "image/svg+xml",
"data": svgText,
"base64Encoded": False,
"metadata": {
@@ -380,7 +427,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_text",
"ext": "svg",
- "contentType": "text/plain",
+ "mimeType": "text/plain",
"data": svgText,
"base64Encoded": False,
"metadata": {
@@ -401,7 +448,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_svg", # Simplified naming
"ext": "svg",
- "contentType": "image/svg+xml",
+ "mimeType": "image/svg+xml",
"data": svgText,
"base64Encoded": False,
"metadata": {
@@ -422,7 +469,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "svg",
- "contentType": "image/svg+xml",
+ "mimeType": "image/svg+xml",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -438,7 +485,7 @@ def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "svg",
- "contentType": "image/svg+xml",
+ "mimeType": "image/svg+xml",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -519,7 +566,7 @@ def extractImageContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis
"sequenceNr": 1,
"name": "1_image", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": encoded_data,
"base64Encoded": True,
"metadata": imageMetadata
@@ -531,7 +578,7 @@ def extractImageContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis
"sequenceNr": 2,
"name": "2_text_image_info", # Simplified naming with label
"ext": "txt",
- "contentType": "text/plain",
+ "mimeType": "text/plain",
"data": imageDescription,
"base64Encoded": False,
"metadata": {
@@ -566,7 +613,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_pdf", # Simplified naming
"ext": "pdf",
- "contentType": "application/pdf",
+ "mimeType": "application/pdf",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -604,7 +651,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": len(contents) + 1,
"name": f"{len(contents) + 1}_text", # Simplified naming
"ext": "txt",
- "contentType": "text/plain",
+ "mimeType": "text/plain",
"data": extractedText,
"base64Encoded": False,
"metadata": {
@@ -639,7 +686,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": len(contents) + 1,
"name": f"{len(contents) + 1}_image_page{pageNum+1}_{imgIndex+1}", # Simplified naming with label
"ext": imageExt,
- "contentType": f"image/{imageExt}",
+ "mimeType": f"image/{imageExt}",
"data": base64.b64encode(imageBytes).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -667,7 +714,7 @@ def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]
"sequenceNr": 1,
"name": "1_pdf", # Simplified naming
"ext": "pdf",
- "contentType": "application/pdf",
+ "mimeType": "application/pdf",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -706,7 +753,7 @@ def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_word", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -743,7 +790,7 @@ def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_text", # Simplified naming
"ext": "txt",
- "contentType": "text/plain",
+ "mimeType": "text/plain",
"data": extractedText,
"base64Encoded": False,
"metadata": {
@@ -765,7 +812,7 @@ def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List
"sequenceNr": 1,
"name": "1_word", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -804,7 +851,7 @@ def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis
"sequenceNr": 1,
"name": "1_excel", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -845,7 +892,7 @@ def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis
"sequenceNr": len(contents) + 1,
"name": f"{len(contents) + 1}_csv_{sheetSafeName}", # Simplified naming with sheet label
"ext": "csv",
- "contentType": "text/csv",
+ "mimeType": "text/csv",
"data": csvContent,
"base64Encoded": False,
"metadata": {
@@ -867,7 +914,7 @@ def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> Lis
"sequenceNr": 1,
"name": "1_excel", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -897,7 +944,7 @@ def extractPowerpointContent(fileName: str, fileContent: bytes, mimeType: str) -
"sequenceNr": 1,
"name": "1_powerpoint", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
@@ -923,11 +970,165 @@ def extractBinaryContent(fileName: str, fileContent: bytes, mimeType: str) -> Li
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": fileExtension,
- "contentType": mimeType,
+ "mimeType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "binary"
}
- }]
\ No newline at end of file
+ }]
+
+def processFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]:
+ """
+ Process a file and return its contents as a list of documents.
+
+ Args:
+ fileContent: Binary content of the file
+ fileName: Name of the file
+ fileMetadata: Optional metadata about the file
+
+ Returns:
+ List of document dictionaries
+ """
+ try:
+ # Get file extension and MIME type
+ fileExtension = os.path.splitext(fileName)[1].lower()[1:]
+ mimeType = fileMetadata.get("mimeType", self.mydom.getMimeType(fileName)) if fileMetadata else self.mydom.getMimeType(fileName)
+
+ # Process based on file type
+ if mimeType.startswith("image/"):
+ return self._processImageFile(fileContent, fileName, fileExtension, mimeType, fileMetadata)
+ elif mimeType == "application/pdf":
+ return self._processPdfFile(fileContent, fileName, fileMetadata)
+ elif mimeType == "text/csv":
+ return self._processCsvFile(fileContent, fileName, fileMetadata)
+ elif mimeType == "text/plain":
+ return self._processTextFile(fileContent, fileName, fileMetadata)
+ else:
+ # Default binary file handling
+ return [{
+ "name": fileName,
+ "ext": fileExtension,
+ "mimeType": mimeType,
+ "data": base64.b64encode(fileContent).decode('utf-8'),
+ "base64Encoded": True,
+ "metadata": {
+ "isText": False
+ }
+ }]
+
+ except Exception as e:
+ logger.error(f"Error processing file {fileName}: {str(e)}")
+ raise FileProcessingError(f"Error processing file: {str(e)}")
+
+ def _processImageFile(self, fileContent: bytes, fileName: str, fileExtension: str, mimeType: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]:
+ """Process an image file."""
+ try:
+ # Create image document
+ imageDoc = {
+ "name": fileName,
+ "ext": fileExtension,
+ "mimeType": mimeType,
+ "data": base64.b64encode(fileContent).decode('utf-8'),
+ "base64Encoded": True,
+ "metadata": {
+ "isText": False,
+ "isImage": True,
+ "format": fileExtension
+ }
+ }
+
+ # Add image description if available
+ if fileMetadata and "description" in fileMetadata:
+ imageDoc["metadata"]["description"] = fileMetadata["description"]
+
+ return [imageDoc]
+
+ except Exception as e:
+ logger.error(f"Error processing image file {fileName}: {str(e)}")
+ raise FileProcessingError(f"Error processing image file: {str(e)}")
+
+ def _processPdfFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]:
+ """Process a PDF file."""
+ try:
+ # Create PDF document
+ pdfDoc = {
+ "name": fileName,
+ "ext": "pdf",
+ "mimeType": "application/pdf",
+ "data": base64.b64encode(fileContent).decode('utf-8'),
+ "base64Encoded": True,
+ "metadata": {
+ "isText": False,
+ "isPdf": True
+ }
+ }
+
+ return [pdfDoc]
+
+ except Exception as e:
+ logger.error(f"Error processing PDF file {fileName}: {str(e)}")
+ raise FileProcessingError(f"Error processing PDF file: {str(e)}")
+
+ def _processCsvFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]:
+ """Process a CSV file."""
+ try:
+ # Try to decode as text first
+ try:
+ csvContent = fileContent.decode('utf-8')
+ base64Encoded = False
+ except UnicodeDecodeError:
+ # If not valid UTF-8, encode as base64
+ csvContent = base64.b64encode(fileContent).decode('utf-8')
+ base64Encoded = True
+
+ # Create CSV document
+ csvDoc = {
+ "name": fileName,
+ "ext": "csv",
+ "mimeType": "text/csv",
+ "data": csvContent,
+ "base64Encoded": base64Encoded,
+ "metadata": {
+ "isText": True,
+ "isCsv": True,
+ "format": "csv"
+ }
+ }
+
+ return [csvDoc]
+
+ except Exception as e:
+ logger.error(f"Error processing CSV file {fileName}: {str(e)}")
+ raise FileProcessingError(f"Error processing CSV file: {str(e)}")
+
+ def _processTextFile(self, fileContent: bytes, fileName: str, fileMetadata: Dict[str, Any] = None) -> List[Dict[str, Any]]:
+ """Process a text file."""
+ try:
+ # Try to decode as text
+ try:
+ textContent = fileContent.decode('utf-8')
+ base64Encoded = False
+ except UnicodeDecodeError:
+ # If not valid UTF-8, encode as base64
+ textContent = base64.b64encode(fileContent).decode('utf-8')
+ base64Encoded = True
+
+ # Create text document
+ textDoc = {
+ "name": fileName,
+ "ext": "txt",
+ "mimeType": "text/plain",
+ "data": textContent,
+ "base64Encoded": base64Encoded,
+ "metadata": {
+ "isText": True
+ }
+ }
+
+ return [textDoc]
+
+ except Exception as e:
+ logger.error(f"Error processing text file {fileName}: {str(e)}")
+ raise FileProcessingError(f"Error processing text file: {str(e)}")
\ No newline at end of file
diff --git a/modules/gatewayInterface.py b/modules/gatewayInterface.py
index 94359949..8008e1f9 100644
--- a/modules/gatewayInterface.py
+++ b/modules/gatewayInterface.py
@@ -123,27 +123,50 @@ class GatewayInterface:
def _uam(self, table: str, recordset: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
- Unified user access management function that filters data based on user privileges.
+ Unified user access management function that filters data based on user privileges
+ and adds access control attributes.
Args:
table: Name of the table
recordset: Recordset to filter based on access rules
Returns:
- Filtered recordset based on user privilege level
+ Filtered recordset with access control attributes
"""
userPrivilege = self.currentUser.get("privilege", "user")
+ filtered_records = []
# Apply filtering based on privilege
if userPrivilege == "sysadmin":
- return recordset # System admins see all records
+ filtered_records = recordset # System admins see all records
elif userPrivilege == "admin":
# Admins see records in their mandate
- return [r for r in recordset if r.get("mandateId") == self.mandateId]
+ filtered_records = [r for r in recordset if r.get("mandateId") == self.mandateId]
else: # Regular users
# Users only see records they own within their mandate
- return [r for r in recordset
+ filtered_records = [r for r in recordset
if r.get("mandateId") == self.mandateId and r.get("userId") == self.userId]
+
+ # Add access control attributes to each record
+ for record in filtered_records:
+ record_id = record.get("id")
+
+ # Set access control flags based on user permissions
+ if table == "mandates":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("mandates", record_id)
+ record["_hideDelete"] = not self._canModify("mandates", record_id)
+ elif table == "users":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("users", record_id)
+ record["_hideDelete"] = not self._canModify("users", record_id)
+ else:
+ # Default access control for other tables
+ record["_hideView"] = False
+ record["_hideEdit"] = not self._canModify(table, record_id)
+ record["_hideDelete"] = not self._canModify(table, record_id)
+
+ return filtered_records
def _canModify(self, table: str, recordId: Optional[int] = None) -> bool:
"""
@@ -393,7 +416,11 @@ class GatewayInterface:
def authenticateUser(self, username: str, password: str) -> Optional[Dict[str, Any]]:
"""Authenticates a user by username and password."""
- # Instead of using UAM filtering, directly get user from database
+ # Clear the users table from cache and reload it
+ if "users" in self.db._tablesCache:
+ del self.db._tablesCache["users"]
+
+ # Get fresh user data
users = self.db.getRecordset("users")
user = next((u for u in users if u.get("username") == username), None)
diff --git a/modules/lucydomInterface.py b/modules/lucydomInterface.py
index b1fc5dcb..91d7769a 100644
--- a/modules/lucydomInterface.py
+++ b/modules/lucydomInterface.py
@@ -11,6 +11,7 @@ from typing import Dict, Any, List, Optional, Union
import importlib
import hashlib
+import json
from modules.mimeUtils import isTextMimeType, determineContentEncoding
@@ -161,35 +162,72 @@ class LucyDOMInterface:
def _uam(self, table: str, recordset: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
- Unified user access management function that filters data based on user privileges.
+ Unified user access management function that filters data based on user privileges
+ and adds access control attributes.
Args:
table: Name of the table
recordset: Recordset to filter based on access rules
Returns:
- Filtered recordset based on user privilege level
+ Filtered recordset with access control attributes
"""
userPrivilege = self.currentUser.get("privilege", "user")
+ filtered_records = []
# Apply filtering based on privilege
if userPrivilege == "sysadmin":
- return recordset # System admins see all records
+ filtered_records = recordset # System admins see all records
elif userPrivilege == "admin":
# Admins see records in their mandate
- return [r for r in recordset if r.get("mandateId") == self.mandateId]
+ filtered_records = [r for r in recordset if r.get("mandateId") == self.mandateId]
else: # Regular users
# To see all prompts from mandate 0 and own
if table == "prompts":
- return [r for r in recordset if
+ filtered_records = [r for r in recordset if
(r.get("mandateId") == self.mandateId and r.get("userId") == self.userId)
or
(r.get("mandateId") == 0)
]
- # Users see only their records
- return [r for r in recordset
+ else:
+ # Users see only their records
+ filtered_records = [r for r in recordset
if r.get("mandateId") == self.mandateId and r.get("userId") == self.userId]
-
+
+ # Add access control attributes to each record
+ for record in filtered_records:
+ record_id = record.get("id")
+
+ # Set access control flags based on user permissions
+ if table == "prompts":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("prompts", record_id)
+ record["_hideDelete"] = not self._canModify("prompts", record_id)
+ elif table == "files":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("files", record_id)
+ record["_hideDelete"] = not self._canModify("files", record_id)
+ record["_hideDownload"] = not self._canModify("files", record_id)
+ elif table == "workflows":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("workflows", record_id)
+ record["_hideDelete"] = not self._canModify("workflows", record_id)
+ elif table == "workflowMessages":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("workflows", record.get("workflowId"))
+ record["_hideDelete"] = not self._canModify("workflows", record.get("workflowId"))
+ elif table == "workflowLogs":
+ record["_hideView"] = False # Everyone can view
+ record["_hideEdit"] = not self._canModify("workflows", record.get("workflowId"))
+ record["_hideDelete"] = not self._canModify("workflows", record.get("workflowId"))
+ else:
+ # Default access control for other tables
+ record["_hideView"] = False
+ record["_hideEdit"] = not self._canModify(table, record_id)
+ record["_hideDelete"] = not self._canModify(table, record_id)
+
+ return filtered_records
+
def _canModify(self, table: str, recordId: Optional[int] = None) -> bool:
"""
Checks if the current user can modify (create/update/delete) records in a table.
@@ -357,11 +395,14 @@ class LucyDOMInterface:
return hashlib.sha256(fileContent).hexdigest()
def checkForDuplicateFile(self, fileHash: str) -> Optional[Dict[str, Any]]:
- """Checks if a file with the same hash already exists."""
- files = self.db.getRecordset("files", recordFilter={"fileHash": fileHash})
- filteredFiles = self._uam("files", files)
- if filteredFiles:
- return filteredFiles[0]
+ """Checks if a file with the same hash already exists for the current user and mandate."""
+ files = self.db.getRecordset("files", recordFilter={
+ "fileHash": fileHash,
+ "mandateId": self.mandateId,
+ "userId": self.userId
+ })
+ if files:
+ return files[0]
return None
def getMimeType(self, filename: str) -> str:
@@ -669,7 +710,7 @@ class LucyDOMInterface:
fileHash = self.calculateFileHash(fileContent)
logger.debug(f"Calculated file hash: {fileHash}")
- # Check for duplicate
+ # Check for duplicate within same user/mandate
existingFile = self.checkForDuplicateFile(fileHash)
if existingFile:
logger.info(f"Duplicate found for {fileName}: {existingFile['id']}")
@@ -691,9 +732,6 @@ class LucyDOMInterface:
# Save binary data
logger.info(f"Saving file content to database for file: {fileName}")
self.createFileData(dbFile["id"], fileContent)
-
- # Debug: Export file to static folder
- self._exportFileToStatic(fileContent, dbFile["id"], fileName)
logger.info(f"File upload process completed for: {fileName}")
return dbFile
@@ -730,12 +768,6 @@ class LucyDOMInterface:
logger.error(f"Error downloading file {fileId}: {str(e)}")
raise FileError(f"Error downloading file: {str(e)}")
- def _exportFileToStatic(self, fileContent: bytes, fileId: int, fileName: str):
- """Debug helper to export files to static folder."""
- debugFilename = f"{fileId}_{fileName}"
- with open(f"./static/{debugFilename}", 'wb') as f:
- f.write(fileContent)
-
# Workflow methods
def getAllWorkflows(self) -> List[Dict[str, Any]]:
@@ -1286,7 +1318,65 @@ class LucyDOMInterface:
except Exception as e:
logger.error(f"Error loading workflow state: {str(e)}")
return None
+
+ # Microsoft Login
+
+ def getMsftToken(self) -> Optional[Dict[str, Any]]:
+ """Get Microsoft token data for the current user from database"""
+ try:
+ # Get token from database using current user's mandateId and userId
+ tokens = self.db.getRecordset("msftTokens", recordFilter={
+ "mandateId": self.mandateId,
+ "userId": self.userId
+ })
+ if tokens and len(tokens) > 0:
+ token_data = json.loads(tokens[0]["token_data"])
+ logger.info(f"Retrieved Microsoft token for user {self.userId}")
+ return token_data
+ else:
+ logger.info(f"No Microsoft token found for user {self.userId}")
+ return None
+
+ except Exception as e:
+ logger.error(f"Error retrieving Microsoft token: {str(e)}")
+ return None
+
+ def saveMsftToken(self, token_data: Dict[str, Any]) -> bool:
+ """Save Microsoft token data for the current user to database"""
+ try:
+ # Check if token already exists
+ tokens = self.db.getRecordset("msftTokens", recordFilter={
+ "mandateId": self.mandateId,
+ "userId": self.userId
+ })
+
+ if tokens and len(tokens) > 0:
+ # Update existing token
+ token_id = tokens[0]["id"]
+ updated_data = {
+ "token_data": json.dumps(token_data),
+ "updated_at": datetime.now().isoformat()
+ }
+ self.db.recordModify("msftTokens", token_id, updated_data)
+ logger.info(f"Updated Microsoft token for user {self.userId}")
+ else:
+ # Create new token
+ new_token = {
+ "mandateId": self.mandateId,
+ "userId": self.userId,
+ "token_data": json.dumps(token_data),
+ "created_at": datetime.now().isoformat(),
+ "updated_at": datetime.now().isoformat()
+ }
+ self.db.recordCreate("msftTokens", new_token)
+ logger.info(f"Saved new Microsoft token for user {self.userId}")
+
+ return True
+
+ except Exception as e:
+ logger.error(f"Error saving Microsoft token: {str(e)}")
+ return False
# Singleton factory for LucyDOMInterface instances per context
_lucydomInterfaces = {}
diff --git a/modules/lucydomModel.py b/modules/lucydomModel.py
index 68939580..782d0b0e 100644
--- a/modules/lucydomModel.py
+++ b/modules/lucydomModel.py
@@ -78,6 +78,31 @@ class FileData(BaseModel):
base64Encoded: bool = Field(description="Flag indicating whether the data is base64 encoded")
+class MsftToken(BaseModel):
+ """Data model for Microsoft authentication tokens"""
+ id: int = Field(description="Unique ID of the token")
+ mandateId: int = Field(description="ID of the associated mandate")
+ userId: int = Field(description="ID of the user")
+ token_data: str = Field(description="JSON string containing the token data")
+ created_at: str = Field(description="Timestamp when the token was created")
+ updated_at: str = Field(description="Timestamp when the token was last updated")
+
+ label: Label = Field(
+ default=Label(default="Microsoft Token", translations={"en": "Microsoft Token", "fr": "Jeton Microsoft"}),
+ description="Label for the class"
+ )
+
+ # Labels for attributes
+ fieldLabels: Dict[str, Label] = {
+ "id": Label(default="ID", translations={}),
+ "mandateId": Label(default="Mandate ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}),
+ "userId": Label(default="User ID", translations={"en": "User ID", "fr": "ID d'utilisateur"}),
+ "token_data": Label(default="Token Data", translations={"en": "Token Data", "fr": "Données du jeton"}),
+ "created_at": Label(default="Created At", translations={"en": "Created At", "fr": "Créé le"}),
+ "updated_at": Label(default="Updated At", translations={"en": "Updated At", "fr": "Mis à jour le"})
+ }
+
+
# Workflow model classes
class DocumentContent(BaseModel):
@@ -85,7 +110,7 @@ class DocumentContent(BaseModel):
sequenceNr: int = Field(1, description="Sequence number of the content in the source document")
name: str = Field(description="Designation")
ext: str = Field(description="Content extension for export: txt, csv, json, jpg, png")
- contentType: str = Field(description="MIME type")
+ mimeType: str = Field(description="MIME type")
summary: str = Field(description="Summary of the file content")
data: str = Field(description="Actual content, text or base64 encoded based on base64Encoded flag")
base64Encoded: bool = Field(description="Flag indicating whether the data is base64 encoded")
@@ -97,6 +122,7 @@ class Document(BaseModel):
name: str = Field(description="Name of the data object")
ext: str = Field(description="Extension of the data object")
fileId: int = Field(description="ID of the referenced file in the database")
+ mimeType: str = Field(description="MIME type")
data: str = Field(description="Content of the data as text or base64 encoded based on base64Encoded flag")
base64Encoded: bool = Field(description="Flag indicating whether the data is base64 encoded")
contents: List[DocumentContent] = Field(description="Document contents")
diff --git a/modules/workflowAgentsRegistry.py b/modules/workflowAgentsRegistry.py
index 25d8d2ff..0d3e03b9 100644
--- a/modules/workflowAgentsRegistry.py
+++ b/modules/workflowAgentsRegistry.py
@@ -32,6 +32,7 @@ class AgentBase:
self.description = "Basic agent functionality"
self.capabilities = []
self.mydom = None
+ self.workflowManager = None # Will be set by workflow manager
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
@@ -58,11 +59,16 @@ class AgentBase:
Args:
task: A dictionary containing:
- taskId: Unique ID for this task
- - workflowId: ID of the parent workflow (optional)
+ - workflowId: ID of the parent workflow
- prompt: The main instruction for the agent
- inputDocuments: List of document objects to process
- outputSpecifications: List of required output documents
- - context: Additional contextual information
+ - context: Additional contextual information including:
+ - workflow: The complete workflow object
+ - workflowRound: Current workflow round
+ - agentType: Type of agent
+ - timestamp: Task timestamp
+ - language: User language
Returns:
A dictionary containing:
@@ -85,51 +91,45 @@ class AgentBase:
"""Wrapper for the utility function"""
return isTextMimeType(mimeType)
- def formatAgentDocumentOutput(self, label: str, content: Any, contentType: str = None) -> Dict[str, Any]:
+ def formatAgentDocumentOutput(self, label: str, content: Any, mimeType: str = None) -> Dict[str, Any]:
"""
- Helper method to properly format a document output with base64Encoded flag and metadata.
+ Format agent output as a document.
Args:
- label: Name of the document
+ label: Label for the document
content: Content of the document
- contentType: Optional content type for the document
-
- Returns:
- Properly formatted document dictionary
+ mimeType: Optional MIME type for the document
"""
- import base64
-
- # Determine if content should be base64 encoded
- should_base64_encode = self.determineBase64EncodingFlag(label, content)
-
- # Process content based on type and encoding flag
- formatted_content = content
-
- if should_base64_encode:
- if isinstance(content, bytes):
- # Convert binary to base64
- formatted_content = base64.b64encode(content).decode('utf-8')
- elif isinstance(content, str):
- try:
- # Check if it's already base64 encoded
- base64.b64decode(content)
- # If we get here, it appears to be valid base64
- formatted_content = content
- except:
- # Not valid base64, so encode it
- formatted_content = base64.b64encode(content.encode('utf-8')).decode('utf-8')
-
- # Create document with metadata
+ # Create document structure
doc = {
- "label": label,
- "content": formatted_content,
- "base64Encoded": should_base64_encode,
- "metadata": {}
+ "id": str(uuid.uuid4()),
+ "name": label,
+ "ext": "txt", # Default extension
+ "data": content,
+ "base64Encoded": False,
+ "metadata": {
+ "isText": True
+ }
}
- # Add content type if provided
- if contentType:
- doc["metadata"]["contentType"] = contentType
+ # Set MIME type if provided
+ if mimeType:
+ doc["mimeType"] = mimeType
+ # Update extension based on MIME type
+ if mimeType == "text/markdown":
+ doc["ext"] = "md"
+ elif mimeType == "text/html":
+ doc["ext"] = "html"
+ elif mimeType == "text/csv":
+ doc["ext"] = "csv"
+ elif mimeType == "application/json":
+ doc["ext"] = "json"
+ elif mimeType.startswith("image/"):
+ doc["ext"] = mimeType.split("/")[1]
+ doc["metadata"]["isText"] = False
+ elif mimeType == "application/pdf":
+ doc["ext"] = "pdf"
+ doc["metadata"]["isText"] = False
return doc
@@ -214,6 +214,11 @@ class AgentRegistry:
self.mydom = mydom
self.updateAgentDependencies()
+ def setWorkflowManager(self, workflowManager):
+ """Set the workflow manager reference for all agents."""
+ for agent in self.agents.values():
+ agent.workflowManager = workflowManager
+
def updateAgentDependencies(self):
"""Update dependencies for all registered agents."""
for agentId, agent in self.agents.items():
@@ -245,8 +250,8 @@ class AgentRegistry:
if agentIdentifier in self.agents:
agent = self.agents[agentIdentifier]
# Ensure the agent has the AI service
- if hasattr(agent, 'setDependencies') and self.mydom:
- agent.setDependencies(mydom=self.mydom)
+ if self.mydom:
+ agent.mydom = self.mydom
return agent
logger.error(f"Agent with identifier '{agentIdentifier}' not found")
return None
diff --git a/modules/workflowManager.py b/modules/workflowManager.py
index 5d89d311..99237e02 100644
--- a/modules/workflowManager.py
+++ b/modules/workflowManager.py
@@ -10,8 +10,9 @@ import json
import re
import uuid
import base64
-from datetime import datetime
+from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Union, Tuple
+import time
from modules.mimeUtils import isTextMimeType, determineContentEncoding
@@ -58,6 +59,7 @@ class WorkflowManager:
self.mydom = domInterface(mandateId, userId)
self.agentRegistry = getAgentRegistry()
self.agentRegistry.setMydom(self.mydom)
+ self.agentRegistry.setWorkflowManager(self) # Set self as workflow manager for all agents
### Workflow State Machine Implementation
@@ -132,6 +134,7 @@ class WorkflowManager:
Returns:
Updated workflow with processing results
"""
+ startTime = time.time()
try:
# State 3: User Message Processing
self.checkExitCriteria(workflow)
@@ -161,8 +164,42 @@ class WorkflowManager:
}
self.messageAdd(workflow, responseMessage)
- self.logAdd(workflow, f"Planned outputs: {len(objFinalDocuments)} documents", level="info", progress=20)
- self.logAdd(workflow, f"Work plan created with {len(objWorkplan)} steps", level="info", progress=25)
+ # Add detailed log entry about the task plan
+ taskPlanLog = "Input: "
+ if objFinalDocuments:
+ taskPlanLog += ", ".join(objFinalDocuments) + "
"
+ else:
+ taskPlanLog += "No input files
"
+
+ # Work Plan Steps
+ for i, task in enumerate(objWorkplan, 1):
+ agentName = task.get("agent", "unknown")
+ taskPlanLog += f"{i}. Agent {agentName}
"
+
+ # Input Documents
+ inputDocs = task.get("inputDocuments", [])
+ if inputDocs:
+ inputLabels = [doc.get("label", "unknown") for doc in inputDocs]
+ taskPlanLog += f"- Input: {', '.join(inputLabels)}
"
+
+ # Task Prompt
+ prompt = task.get('prompt', 'No prompt')
+ taskPlanLog += f"- Task: {prompt}
"
+
+ # Output Documents
+ outputDocs = task.get("outputDocuments", [])
+ if outputDocs:
+ outputLabels = [doc.get("label", "unknown") for doc in outputDocs]
+ taskPlanLog += f"- Output: {', '.join(outputLabels)}
"
+
+ # Final Results
+ taskPlanLog += "Result: "
+ if objFinalDocuments:
+ taskPlanLog += ", ".join(objFinalDocuments)
+ else:
+ taskPlanLog += "No result files"
+
+ self.logAdd(workflow, taskPlanLog, level="info", progress=25)
# State 5: Agent Execution
objResults = []
@@ -199,6 +236,10 @@ class WorkflowManager:
self.checkExitCriteria(workflow)
self.workflowFinish(workflow)
+ # Update processing time
+ endTime = time.time()
+ workflow["dataStats"]["processingTime"] = endTime - startTime
+
return workflow
except Exception as e:
@@ -207,10 +248,15 @@ class WorkflowManager:
workflow["status"] = "failed"
workflow["lastActivity"] = datetime.now().isoformat()
+ # Update processing time even on error
+ endTime = time.time()
+ workflow["dataStats"]["processingTime"] = endTime - startTime
+
# Update in database
self.mydom.updateWorkflow(workflow["id"], {
"status": "failed",
- "lastActivity": workflow["lastActivity"]
+ "lastActivity": workflow["lastActivity"],
+ "dataStats": workflow["dataStats"]
})
self.logAdd(workflow, f"Workflow failed: {str(e)}", level="error", progress=100)
@@ -241,7 +287,12 @@ class WorkflowManager:
"messages": [], # Empty list - will be filled with references
"messageIds": [], # Initialize empty messageIds list
"logs": [],
- "dataStats": {},
+ "dataStats": {
+ "bytesSent": 0,
+ "bytesReceived": 0,
+ "tokensUsed": 0,
+ "processingTime": 0.0
+ },
"currentRound": 1,
"status": "running",
"lastActivity": currentTime,
@@ -287,11 +338,24 @@ class WorkflowManager:
else:
workflow["currentRound"] = 1
+ # Ensure dataStats exists with correct field names
+ if "dataStats" not in workflow:
+ workflow["dataStats"] = {
+ "bytesSent": 0,
+ "bytesReceived": 0,
+ "tokensUsed": 0,
+ "processingTime": 0.0
+ }
+ elif "tokenCount" in workflow["dataStats"]:
+ # Convert old tokenCount to tokensUsed if needed
+ workflow["dataStats"]["tokensUsed"] = workflow["dataStats"].pop("tokenCount", 0)
+
# Update in database - only the relevant workflow fields
workflowUpdate = {
"status": workflow["status"],
"lastActivity": workflow["lastActivity"],
- "currentRound": workflow["currentRound"]
+ "currentRound": workflow["currentRound"],
+ "dataStats": workflow["dataStats"] # Include updated dataStats
}
self.mydom.updateWorkflow(workflowId, workflowUpdate)
@@ -382,6 +446,7 @@ Please analyze the request and create:
3. Do not define document inputs that don't exist or haven't been generated beforehand.
4. Create a logical sequence - earlier agents can create documents that are later used as inputs.
5. If the user has provided documents but hasn't clearly stated what they want, try to act according to the context.
+6. ALL documents provided by the user (where fileSource is "user") MUST be included in the work plan, even if they don't have content summaries or if content extraction failed.
Your answer must be strictly in the JSON_OUTPUT format, with no additions before or after the JSON object.
@@ -415,6 +480,7 @@ JSON_OUTPUT = {{
## RULES for inputDocuments:
1. The user request refers to documents where "fileSource" in available documents is "user". Those documents are in the focus for input
2. In case of redundant label in available documents, use document with highest sequenceNr if not specified differently
+3. ALL documents provided by the user MUST be included in the work plan, even if they don't have content summaries or if content extraction failed
## STRICT RULES FOR document "label":
1. Every document label MUST include a proper file extension that matches the content type.
@@ -472,6 +538,9 @@ JSON_OUTPUT = {{
return []
agentLabel = agent.label
+ # Set workflow manager reference on the agent
+ agent.workflowManager = self
+
# Log the current step
outputLabels = []
for doc in task.get("outputDocuments", []):
@@ -496,7 +565,7 @@ JSON_OUTPUT = {{
# Prepare input documents for the agent
inputDocuments = await self.prepareAgentInputDocuments(task.get('inputDocuments', []), workflow)
-
+
# Create a standardized task object for the agent as per state machine spec
agentTask = {
"taskId": str(uuid.uuid4()),
@@ -505,20 +574,61 @@ JSON_OUTPUT = {{
"inputDocuments": inputDocuments,
"outputSpecifications": outputSpecs,
"context": {
+ "workflow": workflow, # Add the complete workflow object
"workflowRound": workflow.get("currentRound", 1),
"agentType": agentName,
"timestamp": datetime.now().isoformat(),
"language": self.mydom.userLanguage # Pass language to agent
}
}
-
+
# Execute the agent with the standardized task
try:
# Process the task using the agent's standardized interface
logger.debug("TASK: "+self.parseJson2text(agentTask))
logger.debug(f"Agent '{agentName}' AI service available: {agent.mydom is not None}")
+ # Calculate bytes sent before processing
+ bytesSent = len(json.dumps(agentTask).encode('utf-8'))
+ for doc in inputDocuments:
+ if doc.get('data'):
+ bytesSent += len(doc['data'].encode('utf-8'))
+ for content in doc.get('contents', []):
+ if content.get('data'):
+ bytesSent += len(content['data'].encode('utf-8'))
+
+ # Process the task
+ startTime = time.time()
agentResults = await agent.processTask(agentTask)
+ endTime = time.time()
+
+ # Calculate bytes received
+ bytesReceived = len(json.dumps(agentResults).encode('utf-8'))
+ for doc in agentResults.get('documents', []):
+ if doc.get('content'):
+ bytesReceived += len(doc['content'].encode('utf-8'))
+
+ # Calculate tokens used (now using bytes)
+ tokensUsed = bytesSent + bytesReceived
+
+ # Update workflow statistics
+ if 'dataStats' not in workflow:
+ workflow['dataStats'] = {
+ 'bytesSent': 0,
+ 'bytesReceived': 0,
+ 'tokensUsed': 0,
+ 'processingTime': 0
+ }
+
+ workflow['dataStats']['bytesSent'] += bytesSent
+ workflow['dataStats']['bytesReceived'] += bytesReceived
+ workflow['dataStats']['tokensUsed'] += tokensUsed
+ workflow['dataStats']['processingTime'] += (endTime - startTime)
+
+ # Update in database
+ self.mydom.updateWorkflow(workflow["id"], {
+ "dataStats": workflow['dataStats']
+ })
logger.debug(f"Agent '{agentName}' completed task. RESULT: {self.parseJson2text(agentResults)}")
@@ -710,6 +820,38 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
messageObject = self.messageAdd(workflow, messageObject)
logger.debug(f"message_user = {self.parseJson2text(messageObject)}.")
+
+ # Update statistics for user input
+ if role == "user":
+ # Calculate bytes sent
+ bytesSent = len(messageContent.encode('utf-8'))
+ for doc in additionalFiles:
+ if doc.get('data'):
+ bytesSent += len(doc['data'].encode('utf-8'))
+ for content in doc.get('contents', []):
+ if content.get('data'):
+ bytesSent += len(content['data'].encode('utf-8'))
+
+ # Calculate tokens used (now using bytes)
+ tokensUsed = bytesSent
+
+ # Update workflow statistics
+ if 'dataStats' not in workflow:
+ workflow['dataStats'] = {
+ 'bytesSent': 0,
+ 'bytesReceived': 0,
+ 'tokensUsed': 0,
+ 'processingTime': 0
+ }
+
+ workflow['dataStats']['bytesSent'] += bytesSent
+ workflow['dataStats']['tokensUsed'] += tokensUsed
+
+ # Update in database
+ self.mydom.updateWorkflow(workflow["id"], {
+ "dataStats": workflow['dataStats']
+ })
+
return messageObject
async def processFileIds(self, fileIds: List[int]) -> List[Dict[str, Any]]:
@@ -789,8 +931,13 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
"fileId": fileId,
"name": os.path.splitext(fileNameExt)[0] if os.path.splitext(fileNameExt)[0] else "noname",
"ext": os.path.splitext(fileNameExt)[1][1:] if os.path.splitext(fileNameExt)[1] else "bin",
+ "mimeType": mimeType,
"data": encodedData,
"base64Encoded": base64Encoded,
+ "metadata": {
+ "isText": isTextFormat,
+ "base64Encoded": base64Encoded # For backward compatibility
+ },
"contents": []
}
@@ -799,7 +946,7 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Add summaries to each content item
for content in contents:
- content["summary"] = await self.messageSummarizeContent(content)
+ content["summary"] = await self.getContentExtraction(content)
# Ensure base64Encoded flag is set
if "base64Encoded" not in content:
@@ -861,97 +1008,93 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
return preparedInputs
-
- async def messageSummarizeContent(self, content: Dict[str, Any]) -> str:
- return await self.getContentExtraction(
- content,
- "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
- )
-
async def processDocumentForAgent(self, document: Dict[str, Any], docSpec: Dict[str, Any]) -> Dict[str, Any]:
- """
- Processes a document for an agent based on the document specification.
- Uses AI to extract relevant content from the document based on the specification.
-
- Args:
- document: The document to process
- docSpec: The document specification from the project manager
+ """
+ Processes a document for an agent based on the document specification.
+ Uses AI to extract relevant content from the document based on the specification.
- Returns:
- Processed document with AI-extracted content
- """
- processedDoc = document.copy()
- partSpec = docSpec.get("contentPart", "")
-
- # Process each content item in the document
- if "contents" in processedDoc:
- processedContents = []
+ Args:
+ document: The document to process
+ docSpec: The document specification from the project manager
+
+ Returns:
+ Processed document with AI-extracted content
+ """
+ processedDoc = document.copy()
+ partSpec = docSpec.get("contentPart", "")
- for content in processedDoc["contents"]:
- # Check if part required
- if partSpec != "" and partSpec != content.get("name"):
- continue
+ # Process each content item in the document
+ if "contents" in processedDoc:
+ processedContents = []
+
+ for content in processedDoc["contents"]:
+ # Check if part required
+ if partSpec != "" and partSpec != content.get("name"):
+ continue
- # Get the prompt from the document specification
- summary = docSpec.get("prompt", "Extract the relevant information from this document")
+ # Get the prompt from the document specification
+ summary = docSpec.get("prompt", "Extract the relevant information from this document")
+
+ # Process content using the shared helper function
+ processedContent = content.copy()
+ processedContent["dataExtracted"] = await self.getContentExtraction(content, summary)
+ processedContent["metadata"]["aiProcessed"] = True
+
+ processedContents.append(processedContent)
- # Process content using the shared helper function
- processedContent = content.copy()
- processedContent["dataExtracted"] = await self.getContentExtraction(content, summary)
- processedContent["metadata"]["aiProcessed"] = True
-
- processedContents.append(processedContent)
+ processedDoc["contents"] = processedContents
- processedDoc["contents"] = processedContents
-
- return processedDoc
+ return processedDoc
async def getContentExtraction(self, content: Dict[str, Any], prompt: str = None) -> str:
"""
- Helper function that extracts or summarizes content based on its type (text/image/binary).
+ Helper function that extracts or summarizes content based on its encoding.
+ For base64 encoded content, uses callAi4Image. For non-base64 content, uses callAi.
Args:
content: Content item to analyze
- prompt: Optional custom prompt for extraction (default prompts used if not provided)
+ prompt: Custom prompt for extraction (default prompts used if not provided)
Returns:
Extracted or summarized content as text
"""
- # Extract relevant information
- data = content.get("data", "")
- contentType = content.get("contentType", "text/plain")
- base64Encoded = content.get("base64Encoded", False)
-
- # Default prompts if none provided
- if prompt is None:
- text_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
- image_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this image."
- else:
- text_prompt = prompt
- image_prompt = prompt
-
try:
- # For image content, use the specialized image analysis
- if base64Encoded:
- return await self.mydom.callAi4Image(data, contentType, image_prompt)
-
- # For text data, use the regular AI processing
- else:
- return await self.mydom.callAi([
- {"role": "system", "content": "You are a content analyzer. Process the provided content as instructed."},
- {"role": "user", "content": f"{text_prompt}\n\n{data}"}
- ])
+ # Get content data and encoding status
+ data = content.get("data", "")
+ isBase64 = content.get("base64Encoded", False)
+ # Default prompts if none provided
+ if prompt is None:
+ textPrompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
+ imagePrompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this image."
+ else:
+ textPrompt = prompt
+ imagePrompt = prompt
+
+ # Handle base64 encoded content
+ if isBase64:
+ try:
+ # Pass base64 encoded data directly to callAi4Image
+ return await self.mydom.callAi4Image(data, content.get("mimeType", "application/octet-stream"), imagePrompt)
+ except Exception as e:
+ logger.error(f"Error processing base64 content: {str(e)}")
+ return f"Error processing content: {str(e)}"
+ else:
+ # For non-base64 content, use callAi
+ return await self.mydom.callAi([
+ {"role": "system", "content": "You are a content analyzer. Extract relevant information from the provided content."},
+ {"role": "user", "content": f"{textPrompt}\n\nContent:\n{data}"}
+ ], produceUserAnswer=True)
+
except Exception as e:
logger.error(f"Error processing content: {str(e)}")
- return f"Content of type {contentType} (processing failed)"
-
-
+ return f"Error processing content: {str(e)}"
def messageAdd(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> Dict[str, Any]:
"""
Adds a message to the workflow and updates lastActivity.
Saves the message in the database and updates the workflow with references.
+ Also updates statistics for the message.
Args:
workflow: Workflow object
@@ -989,6 +1132,35 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Set status if not present
if "status" not in message:
message["status"] = "step"
+
+ # Calculate statistics for the message
+ bytesSent = len(message.get("content", "").encode('utf-8'))
+ for doc in message.get("documents", []):
+ if doc.get("data"):
+ bytesSent += len(doc["data"].encode('utf-8'))
+ for content in doc.get("contents", []):
+ if content.get("data"):
+ bytesSent += len(content["data"].encode('utf-8'))
+
+ # Calculate tokens used (now using bytes)
+ tokensUsed = bytesSent
+
+ # Update workflow statistics
+ if "dataStats" not in workflow:
+ workflow["dataStats"] = {
+ "bytesSent": 0,
+ "bytesReceived": 0,
+ "tokensUsed": 0,
+ "processingTime": 0
+ }
+
+ # Update statistics based on message role
+ if message["role"] == "user":
+ workflow["dataStats"]["bytesSent"] += bytesSent
+ workflow["dataStats"]["tokensUsed"] += tokensUsed
+ else: # assistant messages
+ workflow["dataStats"]["bytesReceived"] += bytesSent
+ workflow["dataStats"]["tokensUsed"] += tokensUsed
# Add message to workflow
workflow["messages"].append(message)
@@ -1006,15 +1178,39 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Save to database - first the message itself
self.mydom.createWorkflowMessage(message)
- # Then save the workflow with updated references
+ # Then save the workflow with updated references and statistics
workflowUpdate = {
"lastActivity": currentTime,
- "messageIds": workflow["messageIds"] # Update the messageIds field
+ "messageIds": workflow["messageIds"],
+ "dataStats": workflow["dataStats"] # Include updated statistics
}
self.mydom.updateWorkflow(workflow["id"], workflowUpdate)
return message
+ def _trimDataInJson(self, jsonObj: Any) -> Any:
+ """
+ Trims the data attribute in JSON objects while preserving other content.
+
+ Args:
+ jsonObj: JSON object to process
+
+ Returns:
+ Processed JSON object with trimmed data attribute
+ """
+ if isinstance(jsonObj, dict):
+ # Create a copy to avoid modifying the original
+ result = jsonObj.copy()
+ if 'data' in result:
+ # Trim data attribute if it's a string
+ if isinstance(result['data'], str):
+ result['data'] = result['data'][:100] + '...'
+ # If it's a dict or list, convert to string and trim
+ else:
+ result['data'] = str(result['data'])[:100] + '...'
+ return result
+ return jsonObj
+
def logAdd(self, workflow: Dict[str, Any], message: str, level: str = "info",
progress: Optional[int] = None) -> str:
"""
@@ -1043,11 +1239,24 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Set agentName from global settings
agentName = GLOBAL_WORKFLOW_LABELS.get("systemName", "unknown")
+ # Process message if it contains JSON
+ processedMessage = message
+ try:
+ if isinstance(message, str) and ("{" in message or "[" in message):
+ # Try to parse as JSON
+ jsonObj = json.loads(message)
+ # Trim data attribute if present
+ processedJson = self._trimDataInJson(jsonObj)
+ processedMessage = json.dumps(processedJson)
+ except json.JSONDecodeError:
+ # If parsing fails, use original message
+ pass
+
# Create log entry
logEntry = {
"id": logId,
"workflowId": workflow["id"],
- "message": message,
+ "message": processedMessage,
"type": level,
"timestamp": datetime.now().isoformat(),
"agentName": agentName,
@@ -1066,11 +1275,11 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Also log in logger
if level == "info":
- logger.info(f"Workflow {workflow['id']}: {message}")
+ logger.info(f"Workflow {workflow['id']}: {processedMessage}")
elif level == "warning":
- logger.warning(f"Workflow {workflow['id']}: {message}")
+ logger.warning(f"Workflow {workflow['id']}: {processedMessage}")
elif level == "error":
- logger.error(f"Workflow {workflow['id']}: {message}")
+ logger.error(f"Workflow {workflow['id']}: {processedMessage}")
return logId
@@ -1086,56 +1295,69 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
List of file IDs for the saved documents
"""
fileIds = []
+ used_names = set() # Track used names to prevent duplicates
# Extract documents from agent results
documents = agentResults.get("documents", [])
for doc in documents:
try:
- # Extract label (filename) and content
- label = doc.get("label", "unnamed_file.txt")
- content = doc.get("content", "")
+ # Extract document data according to LucyDOM model
+ name = doc.get("name", "")
+ ext = doc.get("ext", "")
+ data = doc.get("data", "")
base64Encoded = doc.get("base64Encoded", False)
- # Split label into name and extension
- name, ext = os.path.splitext(label)
- if ext.startswith('.'):
- ext = ext[1:] # Remove leading dot
- elif not ext:
- # If no extension is provided, default to .txt for text content
- ext = "txt"
- label = f"{label}.{ext}"
+ # Skip if no name or data
+ if not name or not data:
+ logger.warning(f"Skipping document with missing name or data. Name: {name}, Has data: {bool(data)}")
+ continue
+
+ # Ensure unique filename
+ base_name = name
+ counter = 1
+ while f"{base_name}.{ext}" in used_names:
+ base_name = f"{name}_{counter}"
+ counter += 1
+ used_names.add(f"{base_name}.{ext}")
# Convert content to bytes based on base64Encoded flag
- if isinstance(content, str):
+ if isinstance(data, str):
if base64Encoded:
# Decode base64 to bytes
try:
import base64
- fileContent = base64.b64decode(content)
+ fileContent = base64.b64decode(data)
except Exception as e:
logger.warning(f"Failed to decode base64 content: {str(e)}")
- fileContent = content.encode('utf-8')
+ fileContent = data.encode('utf-8')
base64Encoded = False
else:
# Convert text to bytes
- fileContent = content.encode('utf-8')
+ fileContent = data.encode('utf-8')
else:
# Already bytes
- fileContent = content
+ fileContent = data
# Determine MIME type based on extension
- mimeType = self.mydom.getMimeType(label)
+ mimeType = self.mydom.getMimeType(f"{base_name}.{ext}")
- # Save file to database
- fileMeta = self.mydom.saveUploadedFile(fileContent, label)
+ # Create file metadata
+ fileMeta = self.mydom.createFile(
+ name=base_name,
+ mimeType=mimeType,
+ size=len(fileContent)
+ )
if fileMeta and "id" in fileMeta:
- fileId = fileMeta["id"]
- fileIds.append(fileId)
- logger.info(f"Saved document '{label}' with file ID: {fileId} (base64Encoded: {base64Encoded})")
+ # Save file content
+ if self.mydom.createFileData(fileMeta["id"], fileContent):
+ fileIds.append(fileMeta["id"])
+ logger.info(f"Saved document '{base_name}.{ext}' with file ID: {fileMeta['id']} (base64Encoded: {base64Encoded})")
+ else:
+ logger.warning(f"Failed to save content for document '{base_name}.{ext}'")
else:
- logger.warning(f"Failed to save document '{label}'")
+ logger.warning(f"Failed to create file metadata for '{base_name}.{ext}'")
except Exception as e:
logger.error(f"Error saving document from agent results: {str(e)}")
@@ -1174,11 +1396,19 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Extract summaries from all contents
contentSummaries = []
- for content in doc.get("contents", []):
+ if "contents" in doc and doc["contents"]:
+ for content in doc["contents"]:
+ contentSummaries.append({
+ "contentPart": content.get("name", "noname"),
+ "metadata": content.get("metadata", ""),
+ "summary": content.get("summary", "No summary"),
+ })
+ else:
+ # Add a default content summary if no contents exist
contentSummaries.append({
- "contentPart": content.get("name", "noname"),
- "metadata": content.get("metadata", ""),
- "summary": content.get("summary", "No summary"),
+ "contentPart": "1_undefined",
+ "metadata": "",
+ "summary": "No content extracted",
})
# Create document info
@@ -1277,11 +1507,12 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
# Singleton factory for the WorkflowManager
_workflowManagers = {}
+_workflowManagerLastAccess = {} # Track last access time for cleanup
def getWorkflowManager(mandateId: int = 0, userId: int = 0) -> WorkflowManager:
"""
Returns a WorkflowManager for the specified context.
- Reuses existing instances.
+ Reuses existing instances but implements cleanup for inactive instances.
Args:
mandateId: ID of the mandate
@@ -1291,6 +1522,32 @@ def getWorkflowManager(mandateId: int = 0, userId: int = 0) -> WorkflowManager:
WorkflowManager instance
"""
contextKey = f"{mandateId}_{userId}"
+ current_time = datetime.now()
+
+ # Update last access time
+ _workflowManagerLastAccess[contextKey] = current_time
+
+ # Cleanup old instances (older than 1 hour)
+ cleanup_threshold = current_time - timedelta(hours=1)
+ for key in list(_workflowManagers.keys()):
+ if _workflowManagerLastAccess.get(key, current_time) < cleanup_threshold:
+ del _workflowManagers[key]
+ del _workflowManagerLastAccess[key]
+
if contextKey not in _workflowManagers:
_workflowManagers[contextKey] = WorkflowManager(mandateId, userId)
- return _workflowManagers[contextKey]
\ No newline at end of file
+ return _workflowManagers[contextKey]
+
+def cleanupWorkflowManager(mandateId: int, userId: int) -> None:
+ """
+ Explicitly cleanup a WorkflowManager instance.
+
+ Args:
+ mandateId: ID of the mandate
+ userId: ID of the user
+ """
+ contextKey = f"{mandateId}_{userId}"
+ if contextKey in _workflowManagers:
+ del _workflowManagers[contextKey]
+ if contextKey in _workflowManagerLastAccess:
+ del _workflowManagerLastAccess[contextKey]
\ No newline at end of file
diff --git a/notes/changelog.txt b/notes/changelog.txt
index 72a0aafe..75cb1e2d 100644
--- a/notes/changelog.txt
+++ b/notes/changelog.txt
@@ -1,28 +1,33 @@
....................... TASKS
+
+agentDocumentation delivers a ".docx" file, but the content is a ".md" text markup file
+
+access management to extract into separate modules "lucydomAccess.py" and "gatewayAccess.py". Here to move the functions from "*Interface.py", which define what access which role has.
+
+check data extraction tabelle im pdf
+
+Check data extraction of types!
+
+
+
+
----------------------- OPEN
PRIO1:
-CHECK: If pictures not displayed to check utf-8 encoding in the base64 string!! general file writing and reading (example with svg)
-
-add connector to myoutlook
+sharepoint connector with document search, content search, content extraction
PRIO2:
-todo an agent for "code writing and editing" connected to the codebase, working in loops over each document...
-
sharepoint connector with document search, content search, content extraction
Split big files into content-parts
Integrate NDA Text as modal form - Data governance agreement by login with checkbox
-frontend to react
-
-frontend: no labels definition
PRIO3:
@@ -30,7 +35,7 @@ PRIO3:
Tools to transfer incl funds:
- Google SERPAPI (shelly)
- Anthropic Claude (valueon + shelly)
--
+- Cursor Pro
----------------------- DONE
diff --git a/routes/routeMsft.py b/routes/routeMsft.py
index 0c6edbd1..c5afed9b 100644
--- a/routes/routeMsft.py
+++ b/routes/routeMsft.py
@@ -1,12 +1,11 @@
from fastapi import APIRouter, HTTPException, Depends, Request, Response, status, Cookie
from fastapi.responses import HTMLResponse, RedirectResponse, JSONResponse
import msal
-import os
import logging
-import sys
import json
-from typing import Dict, Any, Optional
+from typing import Dict, Any, Optional, List
from datetime import datetime, timedelta
+import secrets
from modules.auth import getCurrentActiveUser, getUserContext, createAccessToken, ACCESS_TOKEN_EXPIRE_MINUTES
from modules.configuration import APP_CONFIG
@@ -45,26 +44,67 @@ app_config = {
"redirect_uri": REDIRECT_URI
}
-# Create a simple file-based token storage
-TOKEN_DIR = './token_storage'
-if not os.path.exists(TOKEN_DIR):
- os.makedirs(TOKEN_DIR)
- logger.info(f"Created token storage directory: {TOKEN_DIR}")
+async def save_token_to_file(token_data, currentUser: Dict[str, Any]):
+ """Save token data to database using LucyDOMInterface"""
+ try:
+ # Get current user context
+ mandateId, userId = await getUserContext(currentUser)
+ if not mandateId or not userId:
+ logger.error("No user context available for token storage")
+ return False
+
+ # Get LucyDOM interface for current user
+ mydom = getLucydomInterface(
+ mandateId=mandateId,
+ userId=userId
+ )
+ if not mydom:
+ logger.error("No LucyDOM interface available for token storage")
+ return False
+
+ # Save token to database
+ success = mydom.saveMsftToken(token_data)
+ if success:
+ logger.info("Token saved successfully to database")
+ return True
+ else:
+ logger.error("Failed to save token to database")
+ return False
+
+ except Exception as e:
+ logger.error(f"Error saving token: {str(e)}")
+ return False
-def save_token_to_file(user_id: str, token_data: Dict[str, Any]):
- """Save token data to a file"""
- filename = os.path.join(TOKEN_DIR, f"{user_id}.json")
- with open(filename, 'w') as f:
- json.dump(token_data, f)
- logger.info(f"Token saved for user: {user_id}")
-
-def load_token_from_file(user_id: str) -> Optional[Dict[str, Any]]:
- """Load token data from a file"""
- filename = os.path.join(TOKEN_DIR, f"{user_id}.json")
- if os.path.exists(filename):
- with open(filename, 'r') as f:
- return json.load(f)
- return None
+async def load_token_from_file(currentUser: Dict[str, Any]):
+ """Load token data from database using LucyDOMInterface"""
+ try:
+ # Get current user context
+ mandateId, userId = await getUserContext(currentUser)
+ if not mandateId or not userId:
+ logger.error("No user context available for token retrieval")
+ return None
+
+ # Get LucyDOM interface for current user
+ mydom = getLucydomInterface(
+ mandateId=mandateId,
+ userId=userId
+ )
+ if not mydom:
+ logger.error("No LucyDOM interface available for token retrieval")
+ return None
+
+ # Get token from database
+ token_data = mydom.getMsftToken()
+ if token_data:
+ logger.info("Token loaded successfully from database")
+ return token_data
+ else:
+ logger.info("No token found in database")
+ return None
+
+ except Exception as e:
+ logger.error(f"Error loading token: {str(e)}")
+ return None
def get_user_info_from_token(access_token: str) -> Optional[Dict[str, Any]]:
"""Get user information using the access token"""
@@ -112,9 +152,9 @@ def verify_token(token: str) -> bool:
logger.error(f"Exception verifying token: {str(e)}")
return False
-def refresh_token(user_id: str) -> bool:
+async def refresh_token(user_id: str, currentUser: Dict[str, Any]) -> bool:
"""Refresh the access token using the stored refresh token"""
- token_data = load_token_from_file(user_id)
+ token_data = await load_token_from_file(currentUser)
if not token_data or not token_data.get("refresh_token"):
logger.warning("No refresh token available")
return False
@@ -139,45 +179,13 @@ def refresh_token(user_id: str) -> bool:
if "refresh_token" in result:
token_data["refresh_token"] = result["refresh_token"]
- save_token_to_file(user_id, token_data)
+ await save_token_to_file(token_data, currentUser)
logger.info("Access token refreshed successfully")
return True
-def silent_login(user_id: str) -> bool:
- """Try to silently log in a user using their refresh token"""
- token_data = load_token_from_file(user_id)
- if not token_data or not token_data.get("refresh_token"):
- logger.info(f"No refresh token found for user: {user_id}")
- return False
-
- # Try to refresh the token
- msal_app = msal.ConfidentialClientApplication(
- app_config["client_id"],
- authority=app_config["authority"],
- client_credential=app_config["client_credential"]
- )
-
- result = msal_app.acquire_token_by_refresh_token(
- token_data["refresh_token"],
- scopes=SCOPES
- )
-
- if "error" in result:
- logger.error(f"Error refreshing token: {result.get('error')}")
- return False
-
- # Update tokens in storage
- token_data["access_token"] = result["access_token"]
- if "refresh_token" in result:
- token_data["refresh_token"] = result["refresh_token"]
-
- save_token_to_file(user_id, token_data)
-
- return True
-
@router.get("/login")
async def login():
- # Modified implementation without requiring current user
+ """Initiate Microsoft login for the current user"""
try:
# Create a confidential client application
msal_app = msal.ConfidentialClientApplication(
@@ -186,225 +194,293 @@ async def login():
client_credential=app_config["client_credential"]
)
- # Build the auth URL
+ # Build the auth URL with a random state
+ state = secrets.token_urlsafe(32)
+
auth_url = msal_app.get_authorization_request_url(
SCOPES,
- state="anonymous-user", # Use a general state since we don't have user context
+ state=state, # Use random state
redirect_uri=app_config["redirect_uri"]
)
- logger.info(f"Redirecting to Microsoft login: {auth_url[:60]}...")
+ logger.info(f"Redirecting to Microsoft login")
return RedirectResponse(auth_url)
except Exception as e:
logger.error(f"Error initiating Microsoft login: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Error initiating Microsoft login: {str(e)}"
+ detail=f"Failed to initiate Microsoft login: {str(e)}"
)
-
+
@router.get("/auth/callback")
-async def auth_callback(request: Request, code: str = None, state: str = None):
- """Handle callback from Microsoft login"""
+async def auth_callback(code: str, state: str, request: Request):
+ """Handle Microsoft OAuth callback"""
try:
- # Log callback for debugging
- logger.info("Received callback from Microsoft login")
-
- if not code:
- logger.error("No authorization code received in callback")
- return JSONResponse(
- status_code=status.HTTP_400_BAD_REQUEST,
- content={"message": "No authorization code received"}
- )
-
- # Extract user and mandate info from state if available
- user_id = None
- mandate_id = None
-
- if state and state != "anonymous-user":
- try:
- mandate_id, user_id = state.split(":")
- logger.info(f"State contains mandate_id: {mandate_id}, user_id: {user_id}")
- except ValueError:
- logger.warning(f"Invalid state format: {state}")
- # Generate a generic user ID if state is invalid
- user_id = f"user_{datetime.now().strftime('%Y%m%d%H%M%S')}"
- else:
- # For anonymous authentication, create a generic user ID
- logger.info("Anonymous authentication (no user context)")
- user_id = f"user_{datetime.now().strftime('%Y%m%d%H%M%S')}"
-
- # Create a confidential client application
- msal_app = msal.ConfidentialClientApplication(
- app_config["client_id"],
- authority=app_config["authority"],
- client_credential=app_config["client_credential"]
+ # Create MSAL app instance
+ app = msal.ConfidentialClientApplication(
+ client_id=CLIENT_ID,
+ client_credential=CLIENT_SECRET,
+ authority=AUTHORITY
)
- # Get tokens using the authorization code
- result = msal_app.acquire_token_by_authorization_code(
- code,
+ # Exchange code for token
+ token_response = app.acquire_token_by_authorization_code(
+ code=code,
scopes=SCOPES,
- redirect_uri=app_config["redirect_uri"]
+ redirect_uri=REDIRECT_URI
)
- if "error" in result:
- logger.error(f"Error acquiring token: {result.get('error')}")
- return JSONResponse(
- status_code=status.HTTP_400_BAD_REQUEST,
- content={"message": f"Error acquiring token: {result.get('error_description', result.get('error'))}"}
+ if "error" in token_response:
+ logger.error(f"Token acquisition failed: {token_response['error']}")
+ return HTMLResponse(
+ content="""
+
+
Please try again.
+ + + + """, + status_code=400 ) - # Store user information - user_info = {} - if "id_token_claims" in result: - user_info = { - "name": result["id_token_claims"].get("name", ""), - "email": result["id_token_claims"].get("preferred_username", ""), - } - - # If we have user info from the token, use that for user_id - token_user_id = result["id_token_claims"].get("oid") or result["id_token_claims"].get("sub") - if token_user_id: - user_id = token_user_id - elif not user_id and user_info.get("email"): - # Fall back to email-based ID if no other ID is available - user_id = user_info.get("email", "user").replace("@", "_").replace(".", "_") - - # Save tokens to file - token_data = { - "access_token": result["access_token"], - "refresh_token": result.get("refresh_token", ""), - "user_info": user_info, - "timestamp": datetime.now().isoformat() - } - - # Ensure token directory exists - if not os.path.exists(TOKEN_DIR): - os.makedirs(TOKEN_DIR) - - # Save token to file - token_file = os.path.join(TOKEN_DIR, f"{user_id}.json") - with open(token_file, 'w') as f: - json.dump(token_data, f) - - logger.info(f"User authenticated: {user_info.get('email', 'unknown')}") - - # Create a success page - html_content = """ - + # Get user info from token + user_info = get_user_info_from_token(token_response["access_token"]) + if not user_info: + logger.error("Failed to get user info from token") + return HTMLResponse( + content=""" + + +Could not retrieve user information.
+ + + + """, + status_code=400 + ) + + # Add user info to token data + token_response["user_info"] = user_info + + # Store tokens in session storage for the frontend to pick up + response = HTMLResponse( + content=f""" - - -You have successfully authenticated with Microsoft.
-You can now close this tab and return to the application.
-Your email templates will now be able to create drafts in your mailbox.
- Close Window -Welcome, {user_info.get('name', 'User')}!
+This window will close automatically.
+ + """ - - return HTMLResponse(content=html_content) + ) - else: - logger.warning("No id_token_claims found in result") - return JSONResponse( - status_code=status.HTTP_400_BAD_REQUEST, - content={"message": "Failed to retrieve user information"} - ) + return response except Exception as e: - logger.error(f"Error in auth callback: {str(e)}", exc_info=True) - return JSONResponse( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - content={"message": f"Error in auth callback: {str(e)}"} + logger.error(f"Authentication failed: {str(e)}") + return HTMLResponse( + content=""" + + +An error occurred during authentication.
+ + + + """, + status_code=500 ) - + @router.get("/status") -async def auth_status( - msft_user_id: Optional[str] = Cookie(None), - currentUser: Dict[str, Any] = Depends(getCurrentActiveUser) -): +async def auth_status(currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): """Check Microsoft authentication status""" try: - # Get user ID - if not msft_user_id: - mandateId, userId = await getUserContext(currentUser) - user_id = str(userId) - else: - user_id = msft_user_id + # Get current user context + mandateId, userId = await getUserContext(currentUser) + if not mandateId or not userId: + logger.info("No user context found") + return JSONResponse({ + "authenticated": False, + "message": "Not authenticated with Microsoft" + }) + + # Check if we have a token for the current user + token_data = await load_token_from_file(currentUser) - # Check if user has a token - token_data = load_token_from_file(user_id) if not token_data: - return JSONResponse( - content={"authenticated": False, "message": "Not authenticated with Microsoft"} + logger.info(f"No token data found for user {userId}") + return JSONResponse({ + "authenticated": False, + "message": "Not authenticated with Microsoft" + }) + + # Verify token is still valid + if not verify_token(token_data["access_token"]): + logger.info("Token invalid, attempting refresh") + # Try to refresh the token + if not await refresh_token(userId, currentUser): + logger.info("Token refresh failed") + return JSONResponse({ + "authenticated": False, + "message": "Token expired and refresh failed" + }) + # Reload token data after refresh + token_data = await load_token_from_file(currentUser) + + # Get user info from token data + user_info = token_data.get("user_info") + if not user_info: + logger.info("No user info found in token data") + return JSONResponse({ + "authenticated": False, + "message": "No user information available" + }) + + logger.info(f"User {user_info.get('name')} is authenticated") + return JSONResponse({ + "authenticated": True, + "user": user_info + }) + + except Exception as e: + logger.error(f"Error checking authentication status: {str(e)}") + return JSONResponse({ + "authenticated": False, + "message": f"Error checking authentication status: {str(e)}" + }) + +@router.post("/logout") +async def logout(currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): + """Logout from Microsoft""" + try: + # Get current user context + mandateId, userId = await getUserContext(currentUser) + if not mandateId or not userId: + return JSONResponse({ + "message": "Not authenticated with Microsoft" + }) + + # Get LucyDOM interface for current user + mydom = getLucydomInterface( + mandateId=mandateId, + userId=userId + ) + if not mydom: + return JSONResponse({ + "message": "Not authenticated with Microsoft" + }) + + # Remove token from database + tokens = mydom.db.getRecordset("msftTokens", recordFilter={ + "mandateId": mandateId, + "userId": userId + }) + + if tokens and len(tokens) > 0: + mydom.db.recordDelete("msftTokens", tokens[0]["id"]) + logger.info(f"Removed Microsoft token for user {userId}") + + return JSONResponse({ + "message": "Successfully logged out from Microsoft" + }) + + except Exception as e: + logger.error(f"Error during logout: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Logout failed: {str(e)}" + ) + +@router.get("/token") +async def get_access_token(currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): + """Get the current user's access token for Microsoft Graph API""" + try: + # Check if we have a token for the current user + token_data = await load_token_from_file(currentUser) + + if not token_data: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Not authenticated with Microsoft" ) - # Check if token is valid - if not verify_token(token_data.get("access_token", "")): - # Try to refresh token - if refresh_token(user_id): - token_data = load_token_from_file(user_id) - user_info = token_data.get("user_info", {}) - return JSONResponse( - content={ - "authenticated": True, - "message": "Token refreshed successfully", - "user": user_info - } - ) - else: - return JSONResponse( - content={ - "authenticated": False, - "message": "Token expired and couldn't be refreshed" - } + # Verify token is still valid + if not verify_token(token_data["access_token"]): + # Try to refresh the token + if not await refresh_token(currentUser["id"], currentUser): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Token expired and refresh failed" ) + # Reload token data after refresh + token_data = await load_token_from_file(currentUser) + + return JSONResponse({ + "access_token": token_data["access_token"] + }) - # Token is valid, return user info - user_info = token_data.get("user_info", {}) - return JSONResponse( - content={ - "authenticated": True, - "message": "Authenticated with Microsoft", - "user": user_info - } - ) - except Exception as e: - logger.error(f"Error checking auth status: {str(e)}") - return JSONResponse( + logger.error(f"Error getting access token: {str(e)}") + raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - content={"message": f"Error checking auth status: {str(e)}"} + detail=f"Error getting access token: {str(e)}" ) + @router.post("/token") async def get_backend_token(request: Request): """Convert MSAL token to backend token""" @@ -467,3 +543,74 @@ async def get_backend_token(request: Request): status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Error processing MSAL token: {str(e)}" ) +======= +@router.post("/save-token") +async def save_token(token_data: Dict[str, Any], currentUser: Dict[str, Any] = Depends(getCurrentActiveUser)): + """Save Microsoft token data from frontend""" + try: + # Save token to database + success = await save_token_to_file(token_data, currentUser) + + if success: + return JSONResponse({ + "success": True, + "message": "Token saved successfully" + }) + else: + return JSONResponse({ + "success": False, + "message": "Failed to save token" + }) + + except Exception as e: + logger.error(f"Error saving token: {str(e)}") + return JSONResponse({ + "success": False, + "message": f"Error saving token: {str(e)}" + }) + +async def generateFinalMessage(self, objUserResponse: str, objFinalDocuments: List[str], objResults: List[Dict[str, Any]]) -> Dict[str, Any]: + """Generate the final message for the workflow""" + try: + # Get list of delivered documents + matchingDocuments = [] + for result in objResults: + if "documents" in result: + for doc in result["documents"]: + if doc.get("label") in objFinalDocuments: + matchingDocuments.append(doc.get("label")) + + # Use the mydom for language-aware AI calls + finalPrompt = await self.mydom.callAi([ + {"role": "system", "content": "You are a project manager, who delivers results to a user."}, + {"role": "user", "content": f""" +Give a brief summary of what has been accomplished, referencing the initial request (objUserResponse). List only the files that have been successfully delivered (filesDelivered). Keep the message concise and professional. + +Here the data: +objUserResponse = {self.parseJson2text(objUserResponse)} +filesDelivered = {self.parseJson2text(matchingDocuments)} +""" + } + ], produceUserAnswer=True) + + # Create basic message structure with proper fields + logger.debug(f"FINAL PROMPT = {self.parseJson2text(finalPrompt)}.") + finalMessage = { + "role": "assistant", + "agentName": "Project Manager", + "content": finalPrompt, + "documents": [] # DO NOT include the results documents, already with agents + } + + logger.debug(f"FINAL MESSAGE = {self.parseJson2text(finalMessage)}.") + return finalMessage + + except Exception as e: + logger.error(f"Error generating final message: {str(e)}") + return { + "role": "assistant", + "agentName": "Project Manager", + "content": "I apologize, but there was an error generating the final message. Please check the logs for more details.", + "documents": [] + } + diff --git a/static/10_email_preview.html b/static/10_email_preview.html deleted file mode 100644 index c900e097..00000000 --- a/static/10_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - -Sehr geehrter Herr Muster,
ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin für Sie passt.
Vielen Dank für Ihr Verständnis.
Mit freundlichen Grüßen,
[Ihr Name]
Sehr geehrter Herr Muster,
ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting von 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser neue Termin f\u00fcr Sie passt.
Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.
Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]
Sehr geehrter Herr Muster,
ich hoffe, diese Nachricht trifft Sie wohl. Ich schreibe Ihnen, um eine Verschiebung unseres Termins von 10 Uhr auf Freitag zu erbitten. Bitte lassen Sie mich wissen, ob dies für Sie möglich ist.
Vielen Dank im Voraus für Ihre Flexibilität.
Mit freundlichen Grüßen,
[Ihr Name]
Sehr geehrter Herr Muster,
ich hoffe, diese Nachricht trifft Sie wohl. Ich schreibe Ihnen, um eine Verschiebung unseres Termins von 10 Uhr auf Freitag zu erbitten. Bitte lassen Sie mich wissen, ob dies f\u00fcr Sie m\u00f6glich ist.
Vielen Dank im Voraus f\u00fcr Ihre Flexibilit\u00e4t.
Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]
To create email templates and drafts, you need to authenticate with your Microsoft account. Follow these steps:
- -Note: You only need to authenticate once. Your session will be remembered for future email operations.
-To create email templates and drafts, you need to authenticate with your Microsoft account.
- -The application will now initiate the Microsoft authentication process. Please follow the instructions in the authentication window.
- -Note: You only need to authenticate once. Your session will be remembered for future email operations.
-Sehr geehrter Herr Muster,
ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting um 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser Termin für Sie passt.
Vielen Dank für Ihr Verständnis.
Mit freundlichen Grüßen,
[Ihr Name]
-Sehr geehrter Herr Muster,
ich hoffe, es geht Ihnen gut. Ich schreibe Ihnen, um unser geplantes Meeting um 10 Uhr auf Freitag zu verschieben. Bitte lassen Sie mich wissen, ob dieser Termin f\u00fcr Sie passt.
Vielen Dank f\u00fcr Ihr Verst\u00e4ndnis.
Mit freundlichen Gr\u00fc\u00dfen,
[Ihr Name]
" -} \ No newline at end of file diff --git a/static/18_generated_code.py b/static/18_generated_code.py deleted file mode 100644 index b53f58c4..00000000 --- a/static/18_generated_code.py +++ /dev/null @@ -1,48 +0,0 @@ -inputFiles = [] # DO NOT CHANGE THIS LINE - -# REQUIREMENTS: - -import json -import csv -from io import StringIO - -def is_prime(n): - if n <= 1: - return False - if n <= 3: - return True - if n % 2 == 0 or n % 3 == 0: - return False - i = 5 - while i * i <= n: - if n % i == 0 or n % (i + 2) == 0: - return False - i += 6 - return True - -def generate_primes(limit): - primes = [] - num = 2 - while len(primes) < limit: - if is_prime(num): - primes.append(num) - num += 1 - return primes - -primes = generate_primes(1000) - -output = StringIO() -csv_writer = csv.writer(output) -for prime in primes: - csv_writer.writerow([prime]) - -result = { - "prime_numbers.csv": { - "content": output.getvalue(), - "base64Encoded": False, - "contentType": "text/csv" - } -} - -import json -print(json.dumps(result)) \ No newline at end of file diff --git a/static/19_execution_history.json b/static/19_execution_history.json deleted file mode 100644 index 8b61dc57..00000000 --- a/static/19_execution_history.json +++ /dev/null @@ -1,19 +0,0 @@ -[ - { - "attempt": 1, - "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\nimport json\nimport csv\nfrom io import StringIO\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\n\noutput = StringIO()\ncsv_writer = csv.writer(output)\nfor prime in primes:\n csv_writer.writerow([prime])\n\nresult = {\n \"prime_numbers.csv\": {\n \"content\": output.getvalue(),\n \"base64Encoded\": False,\n \"contentType\": \"text/csv\"\n }\n}\n\nimport json\nprint(json.dumps(result))", - "result": { - "success": true, - "output": "{\"prime_numbers.csv\": {\"content\": \"2\\r\\n3\\r\\n5\\r\\n7\\r\\n11\\r\\n13\\r\\n17\\r\\n19\\r\\n23\\r\\n29\\r\\n31\\r\\n37\\r\\n41\\r\\n43\\r\\n47\\r\\n53\\r\\n59\\r\\n61\\r\\n67\\r\\n71\\r\\n73\\r\\n79\\r\\n83\\r\\n89\\r\\n97\\r\\n101\\r\\n103\\r\\n107\\r\\n109\\r\\n113\\r\\n127\\r\\n131\\r\\n137\\r\\n139\\r\\n149\\r\\n151\\r\\n157\\r\\n163\\r\\n167\\r\\n173\\r\\n179\\r\\n181\\r\\n191\\r\\n193\\r\\n197\\r\\n199\\r\\n211\\r\\n223\\r\\n227\\r\\n229\\r\\n233\\r\\n239\\r\\n241\\r\\n251\\r\\n257\\r\\n263\\r\\n269\\r\\n271\\r\\n277\\r\\n281\\r\\n283\\r\\n293\\r\\n307\\r\\n311\\r\\n313\\r\\n317\\r\\n331\\r\\n337\\r\\n347\\r\\n349\\r\\n353\\r\\n359\\r\\n367\\r\\n373\\r\\n379\\r\\n383\\r\\n389\\r\\n397\\r\\n401\\r\\n409\\r\\n419\\r\\n421\\r\\n431\\r\\n433\\r\\n439\\r\\n443\\r\\n449\\r\\n457\\r\\n461\\r\\n463\\r\\n467\\r\\n479\\r\\n487\\r\\n491\\r\\n499\\r\\n503\\r\\n509\\r\\n521\\r\\n523\\r\\n541\\r\\n547\\r\\n557\\r\\n563\\r\\n569\\r\\n571\\r\\n577\\r\\n587\\r\\n593\\r\\n599\\r\\n601\\r\\n607\\r\\n613\\r\\n617\\r\\n619\\r\\n631\\r\\n641\\r\\n643\\r\\n647\\r\\n653\\r\\n659\\r\\n661\\r\\n673\\r\\n677\\r\\n683\\r\\n691\\r\\n701\\r\\n709\\r\\n719\\r\\n727\\r\\n733\\r\\n739\\r\\n743\\r\\n751\\r\\n757\\r\\n761\\r\\n769\\r\\n773\\r\\n787\\r\\n797\\r\\n809\\r\\n811\\r\\n821\\r\\n823\\r\\n827\\r\\n829\\r\\n839\\r\\n853\\r\\n857\\r\\n859\\r\\n863\\r\\n877\\r\\n881\\r\\n883\\r\\n887\\r\\n907\\r\\n911\\r\\n919\\r\\n929\\r\\n937\\r\\n941\\r\\n947\\r\\n953\\r\\n967\\r\\n971\\r\\n977\\r\\n983\\r\\n991\\r\\n997\\r\\n1009\\r\\n1013\\r\\n1019\\r\\n1021\\r\\n1031\\r\\n1033\\r\\n1039\\r\\n1049\\r\\n1051\\r\\n1061\\r\\n1063\\r\\n1069\\r\\n1087\\r\\n1091\\r\\n1093\\r\\n1097\\r\\n1103\\r\\n1109\\r\\n1117\\r\\n1123\\r\\n1129\\r\\n1151\\r\\n1153\\r\\n1163\\r\\n1171\\r\\n1181\\r\\n1187\\r\\n1193\\r\\n1201\\r\\n1213\\r\\n1217\\r\\n1223\\r\\n1229\\r\\n1231\\r\\n1237\\r\\n1249\\r\\n1259\\r\\n1277\\r\\n1279\\r\\n1283\\r\\n1289\\r\\n1291\\r\\n1297\\r\\n1301\\r\\n1303\\r\\n1307\\r\\n1319\\r\\n1321\\r\\n1327\\r\\n1361\\r\\n1367\\r\\n1373\\r\\n1381\\r\\n1399\\r\\n1409\\r\\n1423\\r\\n1427\\r\\n1429\\r\\n1433\\r\\n1439\\r\\n1447\\r\\n1451\\r\\n1453\\r\\n1459\\r\\n1471\\r\\n1481\\r\\n1483\\r\\n1487\\r\\n1489\\r\\n1493\\r\\n1499\\r\\n1511\\r\\n1523\\r\\n1531\\r\\n1543\\r\\n1549\\r\\n1553\\r\\n1559\\r\\n1567\\r\\n1571\\r\\n1579\\r\\n1583\\r\\n1597\\r\\n1601\\r\\n1607\\r\\n1609\\r\\n1613\\r\\n1619\\r\\n1621\\r\\n1627\\r\\n1637\\r\\n1657\\r\\n1663\\r\\n1667\\r\\n1669\\r\\n1693\\r\\n1697\\r\\n1699\\r\\n1709\\r\\n1721\\r\\n1723\\r\\n1733\\r\\n1741\\r\\n1747\\r\\n1753\\r\\n1759\\r\\n1777\\r\\n1783\\r\\n1787\\r\\n1789\\r\\n1801\\r\\n1811\\r\\n1823\\r\\n1831\\r\\n1847\\r\\n1861\\r\\n1867\\r\\n1871\\r\\n1873\\r\\n1877\\r\\n1879\\r\\n1889\\r\\n1901\\r\\n1907\\r\\n1913\\r\\n1931\\r\\n1933\\r\\n1949\\r\\n1951\\r\\n1973\\r\\n1979\\r\\n1987\\r\\n1993\\r\\n1997\\r\\n1999\\r\\n2003\\r\\n2011\\r\\n2017\\r\\n2027\\r\\n2029\\r\\n2039\\r\\n2053\\r\\n2063\\r\\n2069\\r\\n2081\\r\\n2083\\r\\n2087\\r\\n2089\\r\\n2099\\r\\n2111\\r\\n2113\\r\\n2129\\r\\n2131\\r\\n2137\\r\\n2141\\r\\n2143\\r\\n2153\\r\\n2161\\r\\n2179\\r\\n2203\\r\\n2207\\r\\n2213\\r\\n2221\\r\\n2237\\r\\n2239\\r\\n2243\\r\\n2251\\r\\n2267\\r\\n2269\\r\\n2273\\r\\n2281\\r\\n2287\\r\\n2293\\r\\n2297\\r\\n2309\\r\\n2311\\r\\n2333\\r\\n2339\\r\\n2341\\r\\n2347\\r\\n2351\\r\\n2357\\r\\n2371\\r\\n2377\\r\\n2381\\r\\n2383\\r\\n2389\\r\\n2393\\r\\n2399\\r\\n2411\\r\\n2417\\r\\n2423\\r\\n2437\\r\\n2441\\r\\n2447\\r\\n2459\\r\\n2467\\r\\n2473\\r\\n2477\\r\\n2503\\r\\n2521\\r\\n2531\\r\\n2539\\r\\n2543\\r\\n2549\\r\\n2551\\r\\n2557\\r\\n2579\\r\\n2591\\r\\n2593\\r\\n2609\\r\\n2617\\r\\n2621\\r\\n2633\\r\\n2647\\r\\n2657\\r\\n2659\\r\\n2663\\r\\n2671\\r\\n2677\\r\\n2683\\r\\n2687\\r\\n2689\\r\\n2693\\r\\n2699\\r\\n2707\\r\\n2711\\r\\n2713\\r\\n2719\\r\\n2729\\r\\n2731\\r\\n2741\\r\\n2749\\r\\n2753\\r\\n2767\\r\\n2777\\r\\n2789\\r\\n2791\\r\\n2797\\r\\n2801\\r\\n2803\\r\\n2819\\r\\n2833\\r\\n2837\\r\\n2843\\r\\n2851\\r\\n2857\\r\\n2861\\r\\n2879\\r\\n2887\\r\\n2897\\r\\n2903\\r\\n2909\\r\\n2917\\r\\n2927\\r\\n2939\\r\\n2953\\r\\n2957\\r\\n2963\\r\\n2969\\r\\n2971\\r\\n2999\\r\\n3001\\r\\n3011\\r\\n3019\\r\\n3023\\r\\n3037\\r\\n3041\\r\\n3049\\r\\n3061\\r\\n3067\\r\\n3079\\r\\n3083\\r\\n3089\\r\\n3109\\r\\n3119\\r\\n3121\\r\\n3137\\r\\n3163\\r\\n3167\\r\\n3169\\r\\n3181\\r\\n3187\\r\\n3191\\r\\n3203\\r\\n3209\\r\\n3217\\r\\n3221\\r\\n3229\\r\\n3251\\r\\n3253\\r\\n3257\\r\\n3259\\r\\n3271\\r\\n3299\\r\\n3301\\r\\n3307\\r\\n3313\\r\\n3319\\r\\n3323\\r\\n3329\\r\\n3331\\r\\n3343\\r\\n3347\\r\\n3359\\r\\n3361\\r\\n3371\\r\\n3373\\r\\n3389\\r\\n3391\\r\\n3407\\r\\n3413\\r\\n3433\\r\\n3449\\r\\n3457\\r\\n3461\\r\\n3463\\r\\n3467\\r\\n3469\\r\\n3491\\r\\n3499\\r\\n3511\\r\\n3517\\r\\n3527\\r\\n3529\\r\\n3533\\r\\n3539\\r\\n3541\\r\\n3547\\r\\n3557\\r\\n3559\\r\\n3571\\r\\n3581\\r\\n3583\\r\\n3593\\r\\n3607\\r\\n3613\\r\\n3617\\r\\n3623\\r\\n3631\\r\\n3637\\r\\n3643\\r\\n3659\\r\\n3671\\r\\n3673\\r\\n3677\\r\\n3691\\r\\n3697\\r\\n3701\\r\\n3709\\r\\n3719\\r\\n3727\\r\\n3733\\r\\n3739\\r\\n3761\\r\\n3767\\r\\n3769\\r\\n3779\\r\\n3793\\r\\n3797\\r\\n3803\\r\\n3821\\r\\n3823\\r\\n3833\\r\\n3847\\r\\n3851\\r\\n3853\\r\\n3863\\r\\n3877\\r\\n3881\\r\\n3889\\r\\n3907\\r\\n3911\\r\\n3917\\r\\n3919\\r\\n3923\\r\\n3929\\r\\n3931\\r\\n3943\\r\\n3947\\r\\n3967\\r\\n3989\\r\\n4001\\r\\n4003\\r\\n4007\\r\\n4013\\r\\n4019\\r\\n4021\\r\\n4027\\r\\n4049\\r\\n4051\\r\\n4057\\r\\n4073\\r\\n4079\\r\\n4091\\r\\n4093\\r\\n4099\\r\\n4111\\r\\n4127\\r\\n4129\\r\\n4133\\r\\n4139\\r\\n4153\\r\\n4157\\r\\n4159\\r\\n4177\\r\\n4201\\r\\n4211\\r\\n4217\\r\\n4219\\r\\n4229\\r\\n4231\\r\\n4241\\r\\n4243\\r\\n4253\\r\\n4259\\r\\n4261\\r\\n4271\\r\\n4273\\r\\n4283\\r\\n4289\\r\\n4297\\r\\n4327\\r\\n4337\\r\\n4339\\r\\n4349\\r\\n4357\\r\\n4363\\r\\n4373\\r\\n4391\\r\\n4397\\r\\n4409\\r\\n4421\\r\\n4423\\r\\n4441\\r\\n4447\\r\\n4451\\r\\n4457\\r\\n4463\\r\\n4481\\r\\n4483\\r\\n4493\\r\\n4507\\r\\n4513\\r\\n4517\\r\\n4519\\r\\n4523\\r\\n4547\\r\\n4549\\r\\n4561\\r\\n4567\\r\\n4583\\r\\n4591\\r\\n4597\\r\\n4603\\r\\n4621\\r\\n4637\\r\\n4639\\r\\n4643\\r\\n4649\\r\\n4651\\r\\n4657\\r\\n4663\\r\\n4673\\r\\n4679\\r\\n4691\\r\\n4703\\r\\n4721\\r\\n4723\\r\\n4729\\r\\n4733\\r\\n4751\\r\\n4759\\r\\n4783\\r\\n4787\\r\\n4789\\r\\n4793\\r\\n4799\\r\\n4801\\r\\n4813\\r\\n4817\\r\\n4831\\r\\n4861\\r\\n4871\\r\\n4877\\r\\n4889\\r\\n4903\\r\\n4909\\r\\n4919\\r\\n4931\\r\\n4933\\r\\n4937\\r\\n4943\\r\\n4951\\r\\n4957\\r\\n4967\\r\\n4969\\r\\n4973\\r\\n4987\\r\\n4993\\r\\n4999\\r\\n5003\\r\\n5009\\r\\n5011\\r\\n5021\\r\\n5023\\r\\n5039\\r\\n5051\\r\\n5059\\r\\n5077\\r\\n5081\\r\\n5087\\r\\n5099\\r\\n5101\\r\\n5107\\r\\n5113\\r\\n5119\\r\\n5147\\r\\n5153\\r\\n5167\\r\\n5171\\r\\n5179\\r\\n5189\\r\\n5197\\r\\n5209\\r\\n5227\\r\\n5231\\r\\n5233\\r\\n5237\\r\\n5261\\r\\n5273\\r\\n5279\\r\\n5281\\r\\n5297\\r\\n5303\\r\\n5309\\r\\n5323\\r\\n5333\\r\\n5347\\r\\n5351\\r\\n5381\\r\\n5387\\r\\n5393\\r\\n5399\\r\\n5407\\r\\n5413\\r\\n5417\\r\\n5419\\r\\n5431\\r\\n5437\\r\\n5441\\r\\n5443\\r\\n5449\\r\\n5471\\r\\n5477\\r\\n5479\\r\\n5483\\r\\n5501\\r\\n5503\\r\\n5507\\r\\n5519\\r\\n5521\\r\\n5527\\r\\n5531\\r\\n5557\\r\\n5563\\r\\n5569\\r\\n5573\\r\\n5581\\r\\n5591\\r\\n5623\\r\\n5639\\r\\n5641\\r\\n5647\\r\\n5651\\r\\n5653\\r\\n5657\\r\\n5659\\r\\n5669\\r\\n5683\\r\\n5689\\r\\n5693\\r\\n5701\\r\\n5711\\r\\n5717\\r\\n5737\\r\\n5741\\r\\n5743\\r\\n5749\\r\\n5779\\r\\n5783\\r\\n5791\\r\\n5801\\r\\n5807\\r\\n5813\\r\\n5821\\r\\n5827\\r\\n5839\\r\\n5843\\r\\n5849\\r\\n5851\\r\\n5857\\r\\n5861\\r\\n5867\\r\\n5869\\r\\n5879\\r\\n5881\\r\\n5897\\r\\n5903\\r\\n5923\\r\\n5927\\r\\n5939\\r\\n5953\\r\\n5981\\r\\n5987\\r\\n6007\\r\\n6011\\r\\n6029\\r\\n6037\\r\\n6043\\r\\n6047\\r\\n6053\\r\\n6067\\r\\n6073\\r\\n6079\\r\\n6089\\r\\n6091\\r\\n6101\\r\\n6113\\r\\n6121\\r\\n6131\\r\\n6133\\r\\n6143\\r\\n6151\\r\\n6163\\r\\n6173\\r\\n6197\\r\\n6199\\r\\n6203\\r\\n6211\\r\\n6217\\r\\n6221\\r\\n6229\\r\\n6247\\r\\n6257\\r\\n6263\\r\\n6269\\r\\n6271\\r\\n6277\\r\\n6287\\r\\n6299\\r\\n6301\\r\\n6311\\r\\n6317\\r\\n6323\\r\\n6329\\r\\n6337\\r\\n6343\\r\\n6353\\r\\n6359\\r\\n6361\\r\\n6367\\r\\n6373\\r\\n6379\\r\\n6389\\r\\n6397\\r\\n6421\\r\\n6427\\r\\n6449\\r\\n6451\\r\\n6469\\r\\n6473\\r\\n6481\\r\\n6491\\r\\n6521\\r\\n6529\\r\\n6547\\r\\n6551\\r\\n6553\\r\\n6563\\r\\n6569\\r\\n6571\\r\\n6577\\r\\n6581\\r\\n6599\\r\\n6607\\r\\n6619\\r\\n6637\\r\\n6653\\r\\n6659\\r\\n6661\\r\\n6673\\r\\n6679\\r\\n6689\\r\\n6691\\r\\n6701\\r\\n6703\\r\\n6709\\r\\n6719\\r\\n6733\\r\\n6737\\r\\n6761\\r\\n6763\\r\\n6779\\r\\n6781\\r\\n6791\\r\\n6793\\r\\n6803\\r\\n6823\\r\\n6827\\r\\n6829\\r\\n6833\\r\\n6841\\r\\n6857\\r\\n6863\\r\\n6869\\r\\n6871\\r\\n6883\\r\\n6899\\r\\n6907\\r\\n6911\\r\\n6917\\r\\n6947\\r\\n6949\\r\\n6959\\r\\n6961\\r\\n6967\\r\\n6971\\r\\n6977\\r\\n6983\\r\\n6991\\r\\n6997\\r\\n7001\\r\\n7013\\r\\n7019\\r\\n7027\\r\\n7039\\r\\n7043\\r\\n7057\\r\\n7069\\r\\n7079\\r\\n7103\\r\\n7109\\r\\n7121\\r\\n7127\\r\\n7129\\r\\n7151\\r\\n7159\\r\\n7177\\r\\n7187\\r\\n7193\\r\\n7207\\r\\n7211\\r\\n7213\\r\\n7219\\r\\n7229\\r\\n7237\\r\\n7243\\r\\n7247\\r\\n7253\\r\\n7283\\r\\n7297\\r\\n7307\\r\\n7309\\r\\n7321\\r\\n7331\\r\\n7333\\r\\n7349\\r\\n7351\\r\\n7369\\r\\n7393\\r\\n7411\\r\\n7417\\r\\n7433\\r\\n7451\\r\\n7457\\r\\n7459\\r\\n7477\\r\\n7481\\r\\n7487\\r\\n7489\\r\\n7499\\r\\n7507\\r\\n7517\\r\\n7523\\r\\n7529\\r\\n7537\\r\\n7541\\r\\n7547\\r\\n7549\\r\\n7559\\r\\n7561\\r\\n7573\\r\\n7577\\r\\n7583\\r\\n7589\\r\\n7591\\r\\n7603\\r\\n7607\\r\\n7621\\r\\n7639\\r\\n7643\\r\\n7649\\r\\n7669\\r\\n7673\\r\\n7681\\r\\n7687\\r\\n7691\\r\\n7699\\r\\n7703\\r\\n7717\\r\\n7723\\r\\n7727\\r\\n7741\\r\\n7753\\r\\n7757\\r\\n7759\\r\\n7789\\r\\n7793\\r\\n7817\\r\\n7823\\r\\n7829\\r\\n7841\\r\\n7853\\r\\n7867\\r\\n7873\\r\\n7877\\r\\n7879\\r\\n7883\\r\\n7901\\r\\n7907\\r\\n7919\\r\\n\", \"base64Encoded\": false, \"contentType\": \"text/csv\"}}\n", - "error": "", - "result": { - "prime_numbers.csv": { - "content": "2\r\n3\r\n5\r\n7\r\n11\r\n13\r\n17\r\n19\r\n23\r\n29\r\n31\r\n37\r\n41\r\n43\r\n47\r\n53\r\n59\r\n61\r\n67\r\n71\r\n73\r\n79\r\n83\r\n89\r\n97\r\n101\r\n103\r\n107\r\n109\r\n113\r\n127\r\n131\r\n137\r\n139\r\n149\r\n151\r\n157\r\n163\r\n167\r\n173\r\n179\r\n181\r\n191\r\n193\r\n197\r\n199\r\n211\r\n223\r\n227\r\n229\r\n233\r\n239\r\n241\r\n251\r\n257\r\n263\r\n269\r\n271\r\n277\r\n281\r\n283\r\n293\r\n307\r\n311\r\n313\r\n317\r\n331\r\n337\r\n347\r\n349\r\n353\r\n359\r\n367\r\n373\r\n379\r\n383\r\n389\r\n397\r\n401\r\n409\r\n419\r\n421\r\n431\r\n433\r\n439\r\n443\r\n449\r\n457\r\n461\r\n463\r\n467\r\n479\r\n487\r\n491\r\n499\r\n503\r\n509\r\n521\r\n523\r\n541\r\n547\r\n557\r\n563\r\n569\r\n571\r\n577\r\n587\r\n593\r\n599\r\n601\r\n607\r\n613\r\n617\r\n619\r\n631\r\n641\r\n643\r\n647\r\n653\r\n659\r\n661\r\n673\r\n677\r\n683\r\n691\r\n701\r\n709\r\n719\r\n727\r\n733\r\n739\r\n743\r\n751\r\n757\r\n761\r\n769\r\n773\r\n787\r\n797\r\n809\r\n811\r\n821\r\n823\r\n827\r\n829\r\n839\r\n853\r\n857\r\n859\r\n863\r\n877\r\n881\r\n883\r\n887\r\n907\r\n911\r\n919\r\n929\r\n937\r\n941\r\n947\r\n953\r\n967\r\n971\r\n977\r\n983\r\n991\r\n997\r\n1009\r\n1013\r\n1019\r\n1021\r\n1031\r\n1033\r\n1039\r\n1049\r\n1051\r\n1061\r\n1063\r\n1069\r\n1087\r\n1091\r\n1093\r\n1097\r\n1103\r\n1109\r\n1117\r\n1123\r\n1129\r\n1151\r\n1153\r\n1163\r\n1171\r\n1181\r\n1187\r\n1193\r\n1201\r\n1213\r\n1217\r\n1223\r\n1229\r\n1231\r\n1237\r\n1249\r\n1259\r\n1277\r\n1279\r\n1283\r\n1289\r\n1291\r\n1297\r\n1301\r\n1303\r\n1307\r\n1319\r\n1321\r\n1327\r\n1361\r\n1367\r\n1373\r\n1381\r\n1399\r\n1409\r\n1423\r\n1427\r\n1429\r\n1433\r\n1439\r\n1447\r\n1451\r\n1453\r\n1459\r\n1471\r\n1481\r\n1483\r\n1487\r\n1489\r\n1493\r\n1499\r\n1511\r\n1523\r\n1531\r\n1543\r\n1549\r\n1553\r\n1559\r\n1567\r\n1571\r\n1579\r\n1583\r\n1597\r\n1601\r\n1607\r\n1609\r\n1613\r\n1619\r\n1621\r\n1627\r\n1637\r\n1657\r\n1663\r\n1667\r\n1669\r\n1693\r\n1697\r\n1699\r\n1709\r\n1721\r\n1723\r\n1733\r\n1741\r\n1747\r\n1753\r\n1759\r\n1777\r\n1783\r\n1787\r\n1789\r\n1801\r\n1811\r\n1823\r\n1831\r\n1847\r\n1861\r\n1867\r\n1871\r\n1873\r\n1877\r\n1879\r\n1889\r\n1901\r\n1907\r\n1913\r\n1931\r\n1933\r\n1949\r\n1951\r\n1973\r\n1979\r\n1987\r\n1993\r\n1997\r\n1999\r\n2003\r\n2011\r\n2017\r\n2027\r\n2029\r\n2039\r\n2053\r\n2063\r\n2069\r\n2081\r\n2083\r\n2087\r\n2089\r\n2099\r\n2111\r\n2113\r\n2129\r\n2131\r\n2137\r\n2141\r\n2143\r\n2153\r\n2161\r\n2179\r\n2203\r\n2207\r\n2213\r\n2221\r\n2237\r\n2239\r\n2243\r\n2251\r\n2267\r\n2269\r\n2273\r\n2281\r\n2287\r\n2293\r\n2297\r\n2309\r\n2311\r\n2333\r\n2339\r\n2341\r\n2347\r\n2351\r\n2357\r\n2371\r\n2377\r\n2381\r\n2383\r\n2389\r\n2393\r\n2399\r\n2411\r\n2417\r\n2423\r\n2437\r\n2441\r\n2447\r\n2459\r\n2467\r\n2473\r\n2477\r\n2503\r\n2521\r\n2531\r\n2539\r\n2543\r\n2549\r\n2551\r\n2557\r\n2579\r\n2591\r\n2593\r\n2609\r\n2617\r\n2621\r\n2633\r\n2647\r\n2657\r\n2659\r\n2663\r\n2671\r\n2677\r\n2683\r\n2687\r\n2689\r\n2693\r\n2699\r\n2707\r\n2711\r\n2713\r\n2719\r\n2729\r\n2731\r\n2741\r\n2749\r\n2753\r\n2767\r\n2777\r\n2789\r\n2791\r\n2797\r\n2801\r\n2803\r\n2819\r\n2833\r\n2837\r\n2843\r\n2851\r\n2857\r\n2861\r\n2879\r\n2887\r\n2897\r\n2903\r\n2909\r\n2917\r\n2927\r\n2939\r\n2953\r\n2957\r\n2963\r\n2969\r\n2971\r\n2999\r\n3001\r\n3011\r\n3019\r\n3023\r\n3037\r\n3041\r\n3049\r\n3061\r\n3067\r\n3079\r\n3083\r\n3089\r\n3109\r\n3119\r\n3121\r\n3137\r\n3163\r\n3167\r\n3169\r\n3181\r\n3187\r\n3191\r\n3203\r\n3209\r\n3217\r\n3221\r\n3229\r\n3251\r\n3253\r\n3257\r\n3259\r\n3271\r\n3299\r\n3301\r\n3307\r\n3313\r\n3319\r\n3323\r\n3329\r\n3331\r\n3343\r\n3347\r\n3359\r\n3361\r\n3371\r\n3373\r\n3389\r\n3391\r\n3407\r\n3413\r\n3433\r\n3449\r\n3457\r\n3461\r\n3463\r\n3467\r\n3469\r\n3491\r\n3499\r\n3511\r\n3517\r\n3527\r\n3529\r\n3533\r\n3539\r\n3541\r\n3547\r\n3557\r\n3559\r\n3571\r\n3581\r\n3583\r\n3593\r\n3607\r\n3613\r\n3617\r\n3623\r\n3631\r\n3637\r\n3643\r\n3659\r\n3671\r\n3673\r\n3677\r\n3691\r\n3697\r\n3701\r\n3709\r\n3719\r\n3727\r\n3733\r\n3739\r\n3761\r\n3767\r\n3769\r\n3779\r\n3793\r\n3797\r\n3803\r\n3821\r\n3823\r\n3833\r\n3847\r\n3851\r\n3853\r\n3863\r\n3877\r\n3881\r\n3889\r\n3907\r\n3911\r\n3917\r\n3919\r\n3923\r\n3929\r\n3931\r\n3943\r\n3947\r\n3967\r\n3989\r\n4001\r\n4003\r\n4007\r\n4013\r\n4019\r\n4021\r\n4027\r\n4049\r\n4051\r\n4057\r\n4073\r\n4079\r\n4091\r\n4093\r\n4099\r\n4111\r\n4127\r\n4129\r\n4133\r\n4139\r\n4153\r\n4157\r\n4159\r\n4177\r\n4201\r\n4211\r\n4217\r\n4219\r\n4229\r\n4231\r\n4241\r\n4243\r\n4253\r\n4259\r\n4261\r\n4271\r\n4273\r\n4283\r\n4289\r\n4297\r\n4327\r\n4337\r\n4339\r\n4349\r\n4357\r\n4363\r\n4373\r\n4391\r\n4397\r\n4409\r\n4421\r\n4423\r\n4441\r\n4447\r\n4451\r\n4457\r\n4463\r\n4481\r\n4483\r\n4493\r\n4507\r\n4513\r\n4517\r\n4519\r\n4523\r\n4547\r\n4549\r\n4561\r\n4567\r\n4583\r\n4591\r\n4597\r\n4603\r\n4621\r\n4637\r\n4639\r\n4643\r\n4649\r\n4651\r\n4657\r\n4663\r\n4673\r\n4679\r\n4691\r\n4703\r\n4721\r\n4723\r\n4729\r\n4733\r\n4751\r\n4759\r\n4783\r\n4787\r\n4789\r\n4793\r\n4799\r\n4801\r\n4813\r\n4817\r\n4831\r\n4861\r\n4871\r\n4877\r\n4889\r\n4903\r\n4909\r\n4919\r\n4931\r\n4933\r\n4937\r\n4943\r\n4951\r\n4957\r\n4967\r\n4969\r\n4973\r\n4987\r\n4993\r\n4999\r\n5003\r\n5009\r\n5011\r\n5021\r\n5023\r\n5039\r\n5051\r\n5059\r\n5077\r\n5081\r\n5087\r\n5099\r\n5101\r\n5107\r\n5113\r\n5119\r\n5147\r\n5153\r\n5167\r\n5171\r\n5179\r\n5189\r\n5197\r\n5209\r\n5227\r\n5231\r\n5233\r\n5237\r\n5261\r\n5273\r\n5279\r\n5281\r\n5297\r\n5303\r\n5309\r\n5323\r\n5333\r\n5347\r\n5351\r\n5381\r\n5387\r\n5393\r\n5399\r\n5407\r\n5413\r\n5417\r\n5419\r\n5431\r\n5437\r\n5441\r\n5443\r\n5449\r\n5471\r\n5477\r\n5479\r\n5483\r\n5501\r\n5503\r\n5507\r\n5519\r\n5521\r\n5527\r\n5531\r\n5557\r\n5563\r\n5569\r\n5573\r\n5581\r\n5591\r\n5623\r\n5639\r\n5641\r\n5647\r\n5651\r\n5653\r\n5657\r\n5659\r\n5669\r\n5683\r\n5689\r\n5693\r\n5701\r\n5711\r\n5717\r\n5737\r\n5741\r\n5743\r\n5749\r\n5779\r\n5783\r\n5791\r\n5801\r\n5807\r\n5813\r\n5821\r\n5827\r\n5839\r\n5843\r\n5849\r\n5851\r\n5857\r\n5861\r\n5867\r\n5869\r\n5879\r\n5881\r\n5897\r\n5903\r\n5923\r\n5927\r\n5939\r\n5953\r\n5981\r\n5987\r\n6007\r\n6011\r\n6029\r\n6037\r\n6043\r\n6047\r\n6053\r\n6067\r\n6073\r\n6079\r\n6089\r\n6091\r\n6101\r\n6113\r\n6121\r\n6131\r\n6133\r\n6143\r\n6151\r\n6163\r\n6173\r\n6197\r\n6199\r\n6203\r\n6211\r\n6217\r\n6221\r\n6229\r\n6247\r\n6257\r\n6263\r\n6269\r\n6271\r\n6277\r\n6287\r\n6299\r\n6301\r\n6311\r\n6317\r\n6323\r\n6329\r\n6337\r\n6343\r\n6353\r\n6359\r\n6361\r\n6367\r\n6373\r\n6379\r\n6389\r\n6397\r\n6421\r\n6427\r\n6449\r\n6451\r\n6469\r\n6473\r\n6481\r\n6491\r\n6521\r\n6529\r\n6547\r\n6551\r\n6553\r\n6563\r\n6569\r\n6571\r\n6577\r\n6581\r\n6599\r\n6607\r\n6619\r\n6637\r\n6653\r\n6659\r\n6661\r\n6673\r\n6679\r\n6689\r\n6691\r\n6701\r\n6703\r\n6709\r\n6719\r\n6733\r\n6737\r\n6761\r\n6763\r\n6779\r\n6781\r\n6791\r\n6793\r\n6803\r\n6823\r\n6827\r\n6829\r\n6833\r\n6841\r\n6857\r\n6863\r\n6869\r\n6871\r\n6883\r\n6899\r\n6907\r\n6911\r\n6917\r\n6947\r\n6949\r\n6959\r\n6961\r\n6967\r\n6971\r\n6977\r\n6983\r\n6991\r\n6997\r\n7001\r\n7013\r\n7019\r\n7027\r\n7039\r\n7043\r\n7057\r\n7069\r\n7079\r\n7103\r\n7109\r\n7121\r\n7127\r\n7129\r\n7151\r\n7159\r\n7177\r\n7187\r\n7193\r\n7207\r\n7211\r\n7213\r\n7219\r\n7229\r\n7237\r\n7243\r\n7247\r\n7253\r\n7283\r\n7297\r\n7307\r\n7309\r\n7321\r\n7331\r\n7333\r\n7349\r\n7351\r\n7369\r\n7393\r\n7411\r\n7417\r\n7433\r\n7451\r\n7457\r\n7459\r\n7477\r\n7481\r\n7487\r\n7489\r\n7499\r\n7507\r\n7517\r\n7523\r\n7529\r\n7537\r\n7541\r\n7547\r\n7549\r\n7559\r\n7561\r\n7573\r\n7577\r\n7583\r\n7589\r\n7591\r\n7603\r\n7607\r\n7621\r\n7639\r\n7643\r\n7649\r\n7669\r\n7673\r\n7681\r\n7687\r\n7691\r\n7699\r\n7703\r\n7717\r\n7723\r\n7727\r\n7741\r\n7753\r\n7757\r\n7759\r\n7789\r\n7793\r\n7817\r\n7823\r\n7829\r\n7841\r\n7853\r\n7867\r\n7873\r\n7877\r\n7879\r\n7883\r\n7901\r\n7907\r\n7919\r\n", - "base64Encoded": false, - "contentType": "text/csv" - } - }, - "exitCode": 0 - } - } -] \ No newline at end of file diff --git a/static/1_LF-Details.png b/static/1_LF-Details.png deleted file mode 100644 index 3a2be57d..00000000 Binary files a/static/1_LF-Details.png and /dev/null differ diff --git a/static/20_prime_numbers.csv b/static/20_prime_numbers.csv deleted file mode 100644 index d5c2a856..00000000 --- a/static/20_prime_numbers.csv +++ /dev/null @@ -1,1000 +0,0 @@ -2 -3 -5 -7 -11 -13 -17 -19 -23 -29 -31 -37 -41 -43 -47 -53 -59 -61 -67 -71 -73 -79 -83 -89 -97 -101 -103 -107 -109 -113 -127 -131 -137 -139 -149 -151 -157 -163 -167 -173 -179 -181 -191 -193 -197 -199 -211 -223 -227 -229 -233 -239 -241 -251 -257 -263 -269 -271 -277 -281 -283 -293 -307 -311 -313 -317 -331 -337 -347 -349 -353 -359 -367 -373 -379 -383 -389 -397 -401 -409 -419 -421 -431 -433 -439 -443 -449 -457 -461 -463 -467 -479 -487 -491 -499 -503 -509 -521 -523 -541 -547 -557 -563 -569 -571 -577 -587 -593 -599 -601 -607 -613 -617 -619 -631 -641 -643 -647 -653 -659 -661 -673 -677 -683 -691 -701 -709 -719 -727 -733 -739 -743 -751 -757 -761 -769 -773 -787 -797 -809 -811 -821 -823 -827 -829 -839 -853 -857 -859 -863 -877 -881 -883 -887 -907 -911 -919 -929 -937 -941 -947 -953 -967 -971 -977 -983 -991 -997 -1009 -1013 -1019 -1021 -1031 -1033 -1039 -1049 -1051 -1061 -1063 -1069 -1087 -1091 -1093 -1097 -1103 -1109 -1117 -1123 -1129 -1151 -1153 -1163 -1171 -1181 -1187 -1193 -1201 -1213 -1217 -1223 -1229 -1231 -1237 -1249 -1259 -1277 -1279 -1283 -1289 -1291 -1297 -1301 -1303 -1307 -1319 -1321 -1327 -1361 -1367 -1373 -1381 -1399 -1409 -1423 -1427 -1429 -1433 -1439 -1447 -1451 -1453 -1459 -1471 -1481 -1483 -1487 -1489 -1493 -1499 -1511 -1523 -1531 -1543 -1549 -1553 -1559 -1567 -1571 -1579 -1583 -1597 -1601 -1607 -1609 -1613 -1619 -1621 -1627 -1637 -1657 -1663 -1667 -1669 -1693 -1697 -1699 -1709 -1721 -1723 -1733 -1741 -1747 -1753 -1759 -1777 -1783 -1787 -1789 -1801 -1811 -1823 -1831 -1847 -1861 -1867 -1871 -1873 -1877 -1879 -1889 -1901 -1907 -1913 -1931 -1933 -1949 -1951 -1973 -1979 -1987 -1993 -1997 -1999 -2003 -2011 -2017 -2027 -2029 -2039 -2053 -2063 -2069 -2081 -2083 -2087 -2089 -2099 -2111 -2113 -2129 -2131 -2137 -2141 -2143 -2153 -2161 -2179 -2203 -2207 -2213 -2221 -2237 -2239 -2243 -2251 -2267 -2269 -2273 -2281 -2287 -2293 -2297 -2309 -2311 -2333 -2339 -2341 -2347 -2351 -2357 -2371 -2377 -2381 -2383 -2389 -2393 -2399 -2411 -2417 -2423 -2437 -2441 -2447 -2459 -2467 -2473 -2477 -2503 -2521 -2531 -2539 -2543 -2549 -2551 -2557 -2579 -2591 -2593 -2609 -2617 -2621 -2633 -2647 -2657 -2659 -2663 -2671 -2677 -2683 -2687 -2689 -2693 -2699 -2707 -2711 -2713 -2719 -2729 -2731 -2741 -2749 -2753 -2767 -2777 -2789 -2791 -2797 -2801 -2803 -2819 -2833 -2837 -2843 -2851 -2857 -2861 -2879 -2887 -2897 -2903 -2909 -2917 -2927 -2939 -2953 -2957 -2963 -2969 -2971 -2999 -3001 -3011 -3019 -3023 -3037 -3041 -3049 -3061 -3067 -3079 -3083 -3089 -3109 -3119 -3121 -3137 -3163 -3167 -3169 -3181 -3187 -3191 -3203 -3209 -3217 -3221 -3229 -3251 -3253 -3257 -3259 -3271 -3299 -3301 -3307 -3313 -3319 -3323 -3329 -3331 -3343 -3347 -3359 -3361 -3371 -3373 -3389 -3391 -3407 -3413 -3433 -3449 -3457 -3461 -3463 -3467 -3469 -3491 -3499 -3511 -3517 -3527 -3529 -3533 -3539 -3541 -3547 -3557 -3559 -3571 -3581 -3583 -3593 -3607 -3613 -3617 -3623 -3631 -3637 -3643 -3659 -3671 -3673 -3677 -3691 -3697 -3701 -3709 -3719 -3727 -3733 -3739 -3761 -3767 -3769 -3779 -3793 -3797 -3803 -3821 -3823 -3833 -3847 -3851 -3853 -3863 -3877 -3881 -3889 -3907 -3911 -3917 -3919 -3923 -3929 -3931 -3943 -3947 -3967 -3989 -4001 -4003 -4007 -4013 -4019 -4021 -4027 -4049 -4051 -4057 -4073 -4079 -4091 -4093 -4099 -4111 -4127 -4129 -4133 -4139 -4153 -4157 -4159 -4177 -4201 -4211 -4217 -4219 -4229 -4231 -4241 -4243 -4253 -4259 -4261 -4271 -4273 -4283 -4289 -4297 -4327 -4337 -4339 -4349 -4357 -4363 -4373 -4391 -4397 -4409 -4421 -4423 -4441 -4447 -4451 -4457 -4463 -4481 -4483 -4493 -4507 -4513 -4517 -4519 -4523 -4547 -4549 -4561 -4567 -4583 -4591 -4597 -4603 -4621 -4637 -4639 -4643 -4649 -4651 -4657 -4663 -4673 -4679 -4691 -4703 -4721 -4723 -4729 -4733 -4751 -4759 -4783 -4787 -4789 -4793 -4799 -4801 -4813 -4817 -4831 -4861 -4871 -4877 -4889 -4903 -4909 -4919 -4931 -4933 -4937 -4943 -4951 -4957 -4967 -4969 -4973 -4987 -4993 -4999 -5003 -5009 -5011 -5021 -5023 -5039 -5051 -5059 -5077 -5081 -5087 -5099 -5101 -5107 -5113 -5119 -5147 -5153 -5167 -5171 -5179 -5189 -5197 -5209 -5227 -5231 -5233 -5237 -5261 -5273 -5279 -5281 -5297 -5303 -5309 -5323 -5333 -5347 -5351 -5381 -5387 -5393 -5399 -5407 -5413 -5417 -5419 -5431 -5437 -5441 -5443 -5449 -5471 -5477 -5479 -5483 -5501 -5503 -5507 -5519 -5521 -5527 -5531 -5557 -5563 -5569 -5573 -5581 -5591 -5623 -5639 -5641 -5647 -5651 -5653 -5657 -5659 -5669 -5683 -5689 -5693 -5701 -5711 -5717 -5737 -5741 -5743 -5749 -5779 -5783 -5791 -5801 -5807 -5813 -5821 -5827 -5839 -5843 -5849 -5851 -5857 -5861 -5867 -5869 -5879 -5881 -5897 -5903 -5923 -5927 -5939 -5953 -5981 -5987 -6007 -6011 -6029 -6037 -6043 -6047 -6053 -6067 -6073 -6079 -6089 -6091 -6101 -6113 -6121 -6131 -6133 -6143 -6151 -6163 -6173 -6197 -6199 -6203 -6211 -6217 -6221 -6229 -6247 -6257 -6263 -6269 -6271 -6277 -6287 -6299 -6301 -6311 -6317 -6323 -6329 -6337 -6343 -6353 -6359 -6361 -6367 -6373 -6379 -6389 -6397 -6421 -6427 -6449 -6451 -6469 -6473 -6481 -6491 -6521 -6529 -6547 -6551 -6553 -6563 -6569 -6571 -6577 -6581 -6599 -6607 -6619 -6637 -6653 -6659 -6661 -6673 -6679 -6689 -6691 -6701 -6703 -6709 -6719 -6733 -6737 -6761 -6763 -6779 -6781 -6791 -6793 -6803 -6823 -6827 -6829 -6833 -6841 -6857 -6863 -6869 -6871 -6883 -6899 -6907 -6911 -6917 -6947 -6949 -6959 -6961 -6967 -6971 -6977 -6983 -6991 -6997 -7001 -7013 -7019 -7027 -7039 -7043 -7057 -7069 -7079 -7103 -7109 -7121 -7127 -7129 -7151 -7159 -7177 -7187 -7193 -7207 -7211 -7213 -7219 -7229 -7237 -7243 -7247 -7253 -7283 -7297 -7307 -7309 -7321 -7331 -7333 -7349 -7351 -7369 -7393 -7411 -7417 -7433 -7451 -7457 -7459 -7477 -7481 -7487 -7489 -7499 -7507 -7517 -7523 -7529 -7537 -7541 -7547 -7549 -7559 -7561 -7573 -7577 -7583 -7589 -7591 -7603 -7607 -7621 -7639 -7643 -7649 -7669 -7673 -7681 -7687 -7691 -7699 -7703 -7717 -7723 -7727 -7741 -7753 -7757 -7759 -7789 -7793 -7817 -7823 -7829 -7841 -7853 -7867 -7873 -7877 -7879 -7883 -7901 -7907 -7919 diff --git a/static/21_email_preview.html b/static/21_email_preview.html deleted file mode 100644 index b02167ca..00000000 --- a/static/21_email_preview.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - -Sehr geehrte Damen und Herren,
anbei finden Sie die Datei 'prime_numbers.csv', die die Liste der Primzahlen enthält.
Mit freundlichen Grüßen,
Ihr Team
Sehr geehrte Damen und Herren,
anbei finden Sie die Datei 'prime_numbers.csv', die die Liste der Primzahlen enth\u00e4lt.
Mit freundlichen Gr\u00fc\u00dfen,
Ihr Team