diff --git a/.gitignore b/.gitignore
index a81e10c4..eb6d2935 100644
--- a/.gitignore
+++ b/.gitignore
@@ -166,4 +166,5 @@ cython_debug/
# local data
gwserver/_database*
-gwserver/results/*
\ No newline at end of file
+gwserver/results/*
+*.log.*
\ No newline at end of file
diff --git a/config.ini b/config.ini
index 8f803896..e4b85267 100644
--- a/config.ini
+++ b/config.ini
@@ -33,13 +33,13 @@ Security_FAILED_LOGIN_LIMIT = 5
Security_LOCK_DURATION_MINUTES = 30
# Agent Webcrawler configuration
-Agent_Webcrawler_TIMEOUT = 10
-Agent_Webcrawler_MAX_URLS = 3
-Agent_Webcrawler_MAX_TOKENS = 30000
-Agent_Webcrawler_USER_AGENT = Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36
-Agent_Webcrawler_SEARCH_ENGINE = https://html.duckduckgo.com/html/?q=
-Agent_Webcrawler_MAX_SEARCH_KEYWORDS = 3
-Agent_Webcrawler_MAX_SEARCH_RESULTS = 5
+Agent_Webcrawler_SERPAPI_ENGINE = google
+Agent_Webcrawler_SERPAPI_APIKEY = 7304bd34bca767aa52dd3233297e30a9edc0abc57871f702b3f8238b9d3ee7bc
+Agent_Webcrawler_SERPAPI_MAX_URLS = 3
+Agent_Webcrawler_SERPAPI_MAX_SEARCH_KEYWORDS = 3
+Agent_Webcrawler_SERPAPI_MAX_SEARCH_RESULTS = 5
+Agent_Webcrawler_SERPAPI_TIMEOUT = 10
+Agent_Webcrawler_SERPAPI_USER_AGENT = Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36
# Agent Coder configuration
Agent_Coder_INSTALL_TIMEOUT = 180
diff --git a/env_prod.env b/env_prod.env
index 2dc3727d..041943a5 100644
--- a/env_prod.env
+++ b/env_prod.env
@@ -25,7 +25,7 @@ APP_TOKEN_EXPIRY=300
APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net
# Logging configuration
-APP_LOGGING_LOG_LEVEL = DEBUG
+APP_LOGGING_LOG_LEVEL = WARNING
APP_LOGGING_LOG_FILE = /home/poweron.log
APP_LOGGING_FORMAT = %(asctime)s - %(levelname)s - %(name)s - %(message)s
APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S
diff --git a/modules/agentCoder.py b/modules/agentCoder.py
index 9e5b4d97..0286a157 100644
--- a/modules/agentCoder.py
+++ b/modules/agentCoder.py
@@ -131,13 +131,14 @@ class AgentCoder(AgentBase):
# 4. Generate code using AI
code, requirements = await self._generateCode(prompt, outputSpecs)
-
if not code:
return {
"feedback": "Failed to generate code for the task.",
"documents": []
}
-
+ # Store the original code without document data
+ original_clean_code = code # Save clean code for later use in improvement
+
# 5. Replace the placeholder with actual inputFiles data
documentDataJson = repr(documentData)
codeWithData = code.replace("inputFiles = \"=== JSONLOAD ===\"", f"inputFiles = {documentDataJson}")
@@ -171,15 +172,17 @@ class AgentCoder(AgentBase):
# Generate improved code based on error
improvedCode, improvedRequirements = await self._improveCode(
- originalCode=codeWithData,
+ originalCode=original_clean_code, # Use clean code without document data
error=error,
executionResult=executionResult,
attempt=retryCount + 1,
outputSpecs=outputSpecs
)
-
+
if improvedCode:
- codeWithData = improvedCode
+ # Inject document data into improved code
+ original_clean_code = improvedCode # Update clean code for next potential improvement
+ codeWithData = improvedCode.replace("inputFiles = \"=== JSONLOAD ===\"", f"inputFiles = {documentDataJson}")
requirements = improvedRequirements
logger.info(f"Code improved for retry {retryCount + 2}")
else:
@@ -313,14 +316,15 @@ STDOUT:
{outputSpecsStr}
INSTRUCTIONS:
1. Fix all errors identified in the error message
-2. Diagnose and fix any logical issues
-3. Pay special attention to:
+2. If there is a requirements error for missing or failes modules, then create alternate code with other modules
+3. Diagnose and fix any logical issues
+4. Pay special attention to:
- Type conversions and data handling
- Error handling and edge cases
- Resource management (file handles, etc.)
- Syntax errors and typos
-4. Keep the inputFiles handling logic intact
-5. Maintain the same overall structure and purpose
+5. Keep the inputFiles handling logic intact
+6. Maintain the same overall structure and purpose
OUTPUT REQUIREMENTS (VERY IMPORTANT):
- Your code MUST define a 'result' variable as a dictionary to store ALL outputs
diff --git a/modules/agentWebcrawler.py b/modules/agentWebcrawler.py
index 5cce8176..cc2fe3f7 100644
--- a/modules/agentWebcrawler.py
+++ b/modules/agentWebcrawler.py
@@ -36,12 +36,18 @@ class AgentWebcrawler(AgentBase):
]
# Web crawling configuration
- self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_MAX_URLS", "5"))
- self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_KEYWORDS", "3"))
- self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_RESULTS", "5"))
- self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_TIMEOUT", "30"))
- self.searchEngine = APP_CONFIG.get("Agent_Webcrawler_SEARCH_ENGINE", "https://html.duckduckgo.com/html/?q=")
- self.userAgent = APP_CONFIG.get("Agent_Webcrawler_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")
+ self.srcApikey = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_APIKEY","")
+ self.srcEngine = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_ENGINE","google")
+ self.srcCountry = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_COUNTRY","auto")
+ self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_MAX_URLS", "5"))
+ self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_MAX_SEARCH_KEYWORDS", "3"))
+ self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_MAX_SEARCH_RESULTS", "5"))
+ self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_TIMEOUT", "30"))
+ self.userAgent = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")
+
+ if not self.srcApikey:
+ logger.error("SerpAPI key not configured")
+
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
@@ -589,7 +595,7 @@ class AgentWebcrawler(AgentBase):
def _searchWeb(self, query: str) -> List[Dict[str, str]]:
"""
- Conduct a web search and return the results.
+ Conduct a web search using SerpAPI and return the results.
Args:
query: The search query
@@ -597,69 +603,71 @@ class AgentWebcrawler(AgentBase):
Returns:
List of search results
"""
- formattedQuery = quote_plus(query)
- url = f"{self.searchEngine}{formattedQuery}"
-
- searchResultsSoup = self._readUrl(url)
- if not searchResultsSoup or not searchResultsSoup.select('.result'):
- logger.warning(f"No search results found for: {query}")
+ if not self.srcApikey:
return []
-
- # Extract search results
- results = []
-
- # Find all result containers
- resultElements = searchResultsSoup.select('.result')
- for result in resultElements:
- # Extract title
- titleElement = result.select_one('.result__a')
- title = titleElement.text.strip() if titleElement else 'No title'
+ # Get user language from mydom if available
+ userLanguage = "en" # Default language
+ if self.mydom.userLanguage:
+ userLanguage = self.mydom.userLanguage
+
+ try:
+ # Format the search request for SerpAPI
+ params = {
+ "engine": self.srcEngine,
+ "q": query,
+ "api_key": self.srcApikey,
+ "num": self.maxResults, # Number of results to return
+ "hl": userLanguage # Identified user language
+ }
+
+ # Make the API request
+ response = requests.get("https://serpapi.com/search", params=params, timeout=self.timeout)
+ response.raise_for_status()
- # Extract URL (DuckDuckGo uses redirects)
- urlElement = titleElement.get('href') if titleElement else ''
- extractedUrl = 'No URL'
+ # Parse JSON response
+ search_results = response.json()
- if urlElement:
- # Extract actual URL from DuckDuckGo's redirect
- if urlElement.startswith('/d.js?q='):
- start = urlElement.find('?q=') + 3
- end = urlElement.find('&', start) if '&' in urlElement[start:] else None
- extractedUrl = unquote(urlElement[start:end])
+ # Extract organic results
+ results = []
+
+ if "organic_results" in search_results:
+ for result in search_results["organic_results"][:self.maxResults]:
+ # Extract title
+ title = result.get("title", "No title")
- # Ensure URL has correct protocol prefix
- if not extractedUrl.startswith(('http://', 'https://')):
- if not extractedUrl.startswith('//'):
- extractedUrl = 'https://' + extractedUrl
- else:
- extractedUrl = 'https:' + extractedUrl
- else:
- extractedUrl = urlElement
-
- # Extract snippet directly from search results page
- snippetElement = result.select_one('.result__snippet')
- snippet = snippetElement.text.strip() if snippetElement else 'No description'
-
- # Get actual page content
- try:
- targetPageSoup = self._readUrl(extractedUrl)
- content = self._extractMainContent(targetPageSoup)
- except Exception as e:
- logger.warning(f"Error extracting content from {extractedUrl}: {str(e)}")
- content = f"Error extracting content: {str(e)}"
-
- results.append({
- 'title': title,
- 'url': extractedUrl,
- 'snippet': snippet,
- 'data': content
- })
-
- # Limit number of results
- if len(results) >= self.maxResults:
- break
+ # Extract URL
+ url = result.get("link", "No URL")
+
+ # Extract snippet
+ snippet = result.get("snippet", "No description")
+
+ # Get actual page content
+ try:
+ targetPageSoup = self._readUrl(url)
+ content = self._extractMainContent(targetPageSoup)
+ except Exception as e:
+ logger.warning(f"Error extracting content from {url}: {str(e)}")
+ content = f"Error extracting content: {str(e)}"
+
+ results.append({
+ 'title': title,
+ 'url': url,
+ 'snippet': snippet,
+ 'data': content
+ })
+
+ # Limit number of results
+ if len(results) >= self.maxResults:
+ break
+ else:
+ logger.warning(f"No organic results found in SerpAPI response for: {query}")
- return results
+ return results
+
+ except Exception as e:
+ logger.error(f"Error searching with SerpAPI for {query}: {str(e)}")
+ return []
def _readUrl(self, url: str) -> BeautifulSoup:
"""
diff --git a/modules/lucydomInterface.py b/modules/lucydomInterface.py
index 54679543..ac779782 100644
--- a/modules/lucydomInterface.py
+++ b/modules/lucydomInterface.py
@@ -229,14 +229,15 @@ class LucyDOMInterface:
# Regular users can create in most tables
return True
- # Language support methods
+ # Language support method
def setUserLanguage(self, languageCode: str):
"""Set the user's preferred language"""
self.userLanguage = languageCode
- self.currentUser["language"] = languageCode
logger.info(f"User language set to: {languageCode}")
+ # AI Call Root Function
+
async def callAi(self, messages: List[Dict[str, str]], produceUserAnswer: bool = False, temperature: float = None) -> str:
"""Enhanced AI service call with language support."""
if not self.aiService:
@@ -679,7 +680,7 @@ class LucyDOMInterface:
self.createFileData(dbFile["id"], fileContent)
# Debug: Export file to static folder
- self._exportFileToStatic(fileContent, dbFile["id"], fileName)
+ # self._exportFileToStatic(fileContent, dbFile["id"], fileName)
logger.info(f"File upload process completed for: {fileName}")
return dbFile
diff --git a/modules/workflowManager.py b/modules/workflowManager.py
index db4c4f65..087cb5a1 100644
--- a/modules/workflowManager.py
+++ b/modules/workflowManager.py
@@ -860,6 +860,13 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
return preparedInputs
+
+ async def messageSummarizeContent(self, content: Dict[str, Any]) -> str:
+ return await self.getContentExtraction(
+ content,
+ "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
+ )
+
async def processDocumentForAgent(self, document: Dict[str, Any], docSpec: Dict[str, Any]) -> Dict[str, Any]:
"""
Processes a document for an agent based on the document specification.
@@ -880,83 +887,72 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
processedContents = []
for content in processedDoc["contents"]:
-
# Check if part required
if partSpec != "" and partSpec != content.get("name"):
continue
- # Get the data from the content
- data = content.get("data", "")
+ # Get the prompt from the document specification
+ summary = docSpec.get("prompt", "Extract the relevant information from this document")
+
+ # Process content using the shared helper function
processedContent = content.copy()
-
- # Check if content data is base64 encoded
- isBase64 = content.get("metadata", {}).get("base64Encoded", False)
-
- try:
- # Use the AI service to process the document content according to the prompt from the project manager for the document specification
- summary = docSpec.get("prompt", "Extract the relevant information from this document")
- aiPrompt = f"""
-# Please process the following document content according to this instruction:
-
-{summary}
-
-
-# Document content:
-
-{data}
-
-
-# Extract and provide only the relevant information as requested.
-"""
-
- # Call the AI service through mydom for language support
- processedData = await self.mydom.callAi([
- {"role": "system", "content": "You are a document processing assistant. Extract only the relevant information as requested."},
- {"role": "user", "content": aiPrompt}
- ])
-
- # DO NOT change the original data field
- # processedContent["data"] unchanged
- processedContent["dataExtracted"] = processedData
- processedContent["metadata"]["aiProcessed"] = True
-
- except Exception as e:
- logger.error(f"Error processing document content with AI: {str(e)}")
- # Fall back to original content if AI processing fails
- processedContent["dataExtracted"] = "(no information)"
-
+ processedContent["dataExtracted"] = await self.getContentExtraction(content, summary)
+ processedContent["metadata"]["aiProcessed"] = True
+
processedContents.append(processedContent)
processedDoc["contents"] = processedContents
return processedDoc
- async def messageSummarizeContent(self, content: Dict[str, Any]) -> str:
+ async def getContentExtraction(self, content: Dict[str, Any], prompt: str = None) -> str:
"""
- Generates a summary for a content item using AI.
+ Helper function that extracts or summarizes content based on its type (text/image/binary).
Args:
- content: Content item to summarize (already processed by getDocumentContents)
+ content: Content item to analyze
+ prompt: Optional custom prompt for extraction (default prompts used if not provided)
Returns:
- Brief summary of the content
+ Extracted or summarized content as text
"""
# Extract relevant information
data = content.get("data", "")
contentType = content.get("contentType", "text/plain")
- isText = content.get("metadata", {}).get("isText", False)
+ base64Encoded = content.get("base64Encoded", False)
+
+ # Default prompts if none provided
+ if prompt is None:
+ text_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
+ image_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this image."
+ else:
+ text_prompt = prompt
+ image_prompt = prompt
try:
- # Use the mydom for language-aware AI calls
- summary = await self.mydom.callAi([
- {"role": "system", "content": "You are a content summarizer. Create very concise summary (1-2 sentences, maximum 200 characters) about this file."},
- {"role": "user", "content": f"Summarize this {contentType} content briefly:\n\n{data}"}
- ])
- return summary
-
+ # For image content, use the specialized image analysis
+ if contentType.startswith("image/") or content.get("metadata", {}).get("isImage", False):
+ # analyzeImage handles base64 encoded data internally
+ return await self.mydom.analyzeImage(data, contentType, image_prompt)
+
+ # For binary data (base64Encoded but not an image), provide a generic description
+ elif base64Encoded:
+ metadata = content.get("metadata", {})
+ format_type = metadata.get("format", "unknown")
+ return f"Binary {format_type} data ({contentType})"
+
+ # For text data, use the regular AI processing
+ else:
+ return await self.mydom.callAi([
+ {"role": "system", "content": "You are a content analyzer. Process the provided content as instructed."},
+ {"role": "user", "content": f"{text_prompt}\n\n{data}"}
+ ])
+
except Exception as e:
- logger.error(f"Error generating content summary: {str(e)}")
- return f"Text content ({contentType})"
+ logger.error(f"Error processing content: {str(e)}")
+ return f"Content of type {contentType} (processing failed)"
+
+
def messageAdd(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> Dict[str, Any]:
"""
diff --git a/notes/changelog.txt b/notes/changelog.txt
index dd61bde4..72a0aafe 100644
--- a/notes/changelog.txt
+++ b/notes/changelog.txt
@@ -1,20 +1,15 @@
....................... TASKS
-UI: Workflow reset does not reset log and messages view
-
----------------------- OPEN
PRIO1:
CHECK: If pictures not displayed to check utf-8 encoding in the base64 string!! general file writing and reading (example with svg)
-STOP File export to static folder ("TODO)
-
add connector to myoutlook
-
PRIO2:
todo an agent for "code writing and editing" connected to the codebase, working in loops over each document...
@@ -30,7 +25,12 @@ frontend to react
frontend: no labels definition
+PRIO3:
+Tools to transfer incl funds:
+- Google SERPAPI (shelly)
+- Anthropic Claude (valueon + shelly)
+-
----------------------- DONE
diff --git a/notes/doc_system.md b/notes/doc_system.md
new file mode 100644
index 00000000..3be83c91
--- /dev/null
+++ b/notes/doc_system.md
@@ -0,0 +1,643 @@
+# Agent Chat System Handbook
+
+# Einführung in das Agent Chat System Handbuch
+
+## Zweck und Umfang des Dokuments
+
+Willkommen zum "Agent Chat System Handbook". Dieses Handbuch dient als umfassende Anleitung für die Implementierung und Verwaltung eines Agent Chat Systems unter Verwendung von FastAPI. Es richtet sich an technische Fachkräfte, die für die Einrichtung, Verwaltung und Optimierung von Chat-Systemen verantwortlich sind. Ziel ist es, Ihnen die notwendigen Kenntnisse und Werkzeuge an die Hand zu geben, um ein effizientes und sicheres Chat-System zu entwickeln und zu betreiben.
+
+## Kontext und Hintergrundinformationen
+
+In der heutigen digitalen Welt sind Chat-Systeme ein wesentlicher Bestandteil der Kundenkommunikation und des Supports. Mit der zunehmenden Integration von Künstlicher Intelligenz (KI) in diese Systeme wird es immer wichtiger, robuste und skalierbare Lösungen zu entwickeln. FastAPI bietet eine moderne und leistungsstarke Plattform zur Erstellung von Web-APIs, die sich ideal für die Entwicklung eines solchen Systems eignet. Dieses Handbuch basiert auf den bereitgestellten FastAPI-Anwendungsdateien und bietet eine detaillierte Anleitung zur Implementierung eines Agent Chat Systems.
+
+## Inhalt des Dokuments
+
+Im "Agent Chat System Handbook" finden Sie detaillierte Informationen zu folgenden Themen:
+
+- **FastAPI Setup**: Schritt-für-Schritt-Anleitung zur Einrichtung der FastAPI-Umgebung.
+- **Benutzerverwaltung**: Methoden zur Verwaltung von Benutzerkonten und -rollen.
+- **KI-Integration**: Implementierung von KI-Funktionen zur Verbesserung der Chat-Interaktionen.
+- **Authentifizierung**: Sicherstellung der Sicherheit und Integrität des Systems durch robuste Authentifizierungsmechanismen.
+- **Mandatsverwaltung**: Verwaltung von Benutzerrechten und -mandaten innerhalb des Systems.
+- **Attributverwaltung**: Umgang mit benutzerdefinierten Attributen und deren Verwaltung.
+- **Prompt-Management**: Erstellung und Verwaltung von Eingabeaufforderungen für die KI-Interaktion.
+- **Dateioperationen**: Verwaltung und Verarbeitung von Dateien innerhalb des Systems.
+- **Workflow-Management**: Optimierung und Automatisierung von Arbeitsabläufen im Chat-System.
+
+## Ton und Zielgruppe
+
+Dieses Handbuch ist in einem formellen und technischen Ton verfasst, um den Anforderungen einer professionellen Leserschaft gerecht zu werden. Es richtet sich an Entwickler, Systemadministratoren und technische Projektleiter, die mit der Implementierung und Verwaltung von Chat-Systemen betraut sind. Wir empfehlen, dass die Leser über grundlegende Kenntnisse in FastAPI und Web-API-Entwicklung verfügen, um den maximalen Nutzen aus diesem Handbuch zu ziehen.
+
+Wir hoffen, dass dieses Handbuch Ihnen als wertvolle Ressource dient und Sie bei der erfolgreichen Implementierung Ihres Agent Chat Systems unterstützt.
+
+# Einführung
+
+## Zweck des Handbuchs
+
+Das "Agent Chat System Handbook" dient als umfassende Anleitung zur Implementierung und Nutzung des Agenten-Chat-Systems, das auf der FastAPI-Plattform basiert. Dieses Handbuch richtet sich an technische Anwender, die eine detaillierte Anleitung zur Einrichtung, Verwaltung und Optimierung des Systems benötigen. Es bietet eine strukturierte Übersicht über die verschiedenen Komponenten und Funktionen des Systems, um eine effiziente Nutzung und Anpassung zu gewährleisten. Ziel ist es, den Anwendern ein tiefes Verständnis der Systemarchitektur und der zugrunde liegenden Prozesse zu vermitteln, um eine reibungslose Integration und Verwaltung zu ermöglichen.
+
+## Systemübersicht
+
+Das Agenten-Chat-System ist eine leistungsstarke Plattform, die entwickelt wurde, um die Kommunikation zwischen Agenten und Nutzern zu optimieren. Es nutzt die FastAPI-Technologie, um eine schnelle und skalierbare Lösung zu bieten. Die Hauptkomponenten des Systems umfassen:
+
+- **Anwendungssetup**: Die FastAPI-Anwendung wird mit spezifischen Konfigurationen für Logging, CORS (Cross-Origin Resource Sharing) und Authentifizierung eingerichtet. Diese Konfigurationen sind entscheidend für die Sicherheit und Leistung der Anwendung.
+
+- **Benutzerverwaltung**: Ein robustes Modul zur Verwaltung von Benutzerkonten, das die Erstellung, Aktualisierung und Löschung von Benutzerprofilen ermöglicht. Es stellt sicher, dass nur autorisierte Benutzer Zugriff auf das System haben.
+
+- **Mandatsverwaltung**: Diese Komponente ermöglicht die Verwaltung von Mandaten, die den Zugriff und die Berechtigungen innerhalb des Systems regeln. Sie ist essenziell für die Einhaltung von Sicherheitsrichtlinien.
+
+- **Attributverwaltung**: Ein flexibles System zur Verwaltung von Attributen, die zur Personalisierung und Anpassung der Benutzererfahrung verwendet werden können.
+
+- **Prompt-Management**: Diese Funktion ermöglicht die Verwaltung von Eingabeaufforderungen, die zur Interaktion mit den Nutzern verwendet werden. Sie ist entscheidend für die Anpassung der Kommunikation an spezifische Anforderungen.
+
+- **Dateioperationen**: Ein Modul zur effizienten Handhabung von Dateivorgängen, das das Hochladen, Herunterladen und Verwalten von Dateien innerhalb des Systems unterstützt.
+
+- **Workflow-Management**: Diese Komponente ermöglicht die Definition und Verwaltung von Arbeitsabläufen, um die Effizienz und Konsistenz der Prozesse zu gewährleisten.
+
+- **KI-Integration**: Das System bietet eine nahtlose Integration von KI-Technologien, um die Interaktion und Entscheidungsfindung zu verbessern.
+
+- **Authentifizierung**: Ein sicheres Authentifizierungssystem, das sicherstellt, dass nur berechtigte Benutzer Zugriff auf die Anwendung haben.
+
+Dieses Handbuch wird detaillierte Anleitungen und Beispiele für jede dieser Komponenten bieten, um eine umfassende Unterstützung bei der Implementierung und Verwaltung des Agenten-Chat-Systems zu gewährleisten.
+
+# Application Setup
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird die Einrichtung der Anwendung detailliert beschrieben. Diese Anleitung richtet sich an technische Benutzer und bietet eine umfassende Übersicht über die Initialisierung der FastAPI-Anwendung, die Konfiguration von statischen Dateien und die allgemeinen Endpunkte.
+
+## FastAPI Initialization
+
+Die Initialisierung der FastAPI-Anwendung ist der erste Schritt zur Einrichtung des Agent Chat Systems. Hierbei werden grundlegende Parameter und Konfigurationen festgelegt, die für den Betrieb der Anwendung erforderlich sind.
+
+### Schritte zur Initialisierung:
+
+1. **Anwendungserstellung**:
+ - Die FastAPI-Anwendung wird mit einem Titel und einer Beschreibung initialisiert. Diese Informationen sind nützlich für die Dokumentation und API-Dokumentationsseiten.
+ - Beispiel:
+ ```python
+ from fastapi import FastAPI
+
+ app = FastAPI(
+ title="Agent Chat System",
+ description="Ein System zur Verwaltung von Agenten-Chats"
+ )
+ ```
+
+2. **Lebenszyklus-Management**:
+ - Die Anwendung verwendet einen Lebenszyklus-Manager, um Ereignisse beim Start und Herunterfahren der Anwendung zu verwalten. Dies ist entscheidend für die ordnungsgemäße Ressourcenverwaltung.
+ - Beispiel:
+ ```python
+ @app.on_event("startup")
+ async def startup_event():
+ # Initialisierungslogik hier
+
+ @app.on_event("shutdown")
+ async def shutdown_event():
+ # Bereinigungslogik hier
+ ```
+
+3. **CORS-Konfiguration**:
+ - Die Cross-Origin Resource Sharing (CORS) Einstellungen werden konfiguriert, um den Zugriff von verschiedenen Ursprüngen zu ermöglichen, was besonders wichtig für Webanwendungen ist, die auf verschiedenen Domains gehostet werden.
+
+## Static Files Setup
+
+Die Konfiguration von statischen Dateien ermöglicht es der Anwendung, Ressourcen wie Bilder, CSS-Dateien und JavaScript-Dateien bereitzustellen, die für die Benutzeroberfläche benötigt werden.
+
+### Schritte zur Konfiguration:
+
+1. **Verzeichnis für statische Dateien**:
+ - Ein Verzeichnis wird definiert, in dem alle statischen Dateien gespeichert werden. Dieses Verzeichnis wird in der Regel relativ zum Projektverzeichnis angegeben.
+ - Beispiel:
+ ```python
+ from fastapi.staticfiles import StaticFiles
+
+ app.mount("/static", StaticFiles(directory="static"), name="static")
+ ```
+
+2. **Zugriff auf statische Dateien**:
+ - Die Anwendung stellt sicher, dass die statischen Dateien über einen bestimmten URL-Pfad zugänglich sind, was die Bereitstellung und den Zugriff auf diese Ressourcen erleichtert.
+
+## Endpoints Overview
+
+Die Endpunkte der Anwendung sind die Schnittstellen, über die externe Systeme und Benutzer mit der Anwendung interagieren können. Eine klare Übersicht über die verfügbaren Endpunkte ist entscheidend für die Integration und Nutzung der Anwendung.
+
+### Allgemeine Endpunkte:
+
+1. **Benutzerverwaltung**:
+ - Endpunkte zur Erstellung, Aktualisierung und Löschung von Benutzern.
+ - Beispiel:
+ ```python
+ @app.post("/users/")
+ async def create_user(user: User):
+ # Logik zur Benutzererstellung
+ ```
+
+2. **Mandatsverwaltung**:
+ - Endpunkte zur Verwaltung von Mandaten, einschließlich der Zuweisung und Verwaltung von Berechtigungen.
+
+3. **Attributverwaltung**:
+ - Endpunkte zur Verwaltung von Attributen, die für die Anpassung und Personalisierung der Agenten-Chats verwendet werden.
+
+4. **Prompt-Management**:
+ - Endpunkte zur Verwaltung von Eingabeaufforderungen, die für die Interaktion mit Benutzern verwendet werden.
+
+Diese detaillierte Anleitung zur Einrichtung der Anwendung stellt sicher, dass technische Benutzer die FastAPI-Anwendung korrekt initialisieren und konfigurieren können, um eine reibungslose Funktionalität des Agent Chat Systems zu gewährleisten.
+
+# Logging
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird die Konfiguration und Einrichtung des Loggings im Rahmen der FastAPI-Anwendung beschrieben. Eine ordnungsgemäße Protokollierung ist entscheidend für die Überwachung und Fehlerbehebung der Anwendung. Dieser Abschnitt ist in zwei Hauptunterabschnitte unterteilt: Initialisierung und Handler.
+
+## Initialisierung
+
+Die Initialisierung des Loggings ist ein wesentlicher Schritt, um sicherzustellen, dass alle Ereignisse innerhalb der Anwendung korrekt erfasst werden. Die Konfiguration des Loggings erfolgt in der Regel zu Beginn der Anwendung, um sicherzustellen, dass alle nachfolgenden Prozesse und Ereignisse protokolliert werden.
+
+### Beispiel für die Logging-Initialisierung
+
+```python
+import logging
+
+def initialize_logging():
+ logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ handlers=[
+ logging.FileHandler("app.log"),
+ logging.StreamHandler()
+ ]
+ )
+```
+
+In diesem Beispiel wird das Logging mit einem Basislevel von `INFO` konfiguriert. Die Formatierung der Log-Nachrichten umfasst das Datum und die Uhrzeit, den Namen des Loggers, die Log-Stufe und die eigentliche Nachricht. Zwei Handler werden eingerichtet: ein `FileHandler`, der die Logs in eine Datei schreibt, und ein `StreamHandler`, der die Logs auf der Konsole ausgibt.
+
+## Handler
+
+Handler sind ein wesentlicher Bestandteil des Loggings, da sie bestimmen, wohin die Log-Nachrichten gesendet werden. In der FastAPI-Anwendung können verschiedene Arten von Handlern eingerichtet werden, um die Protokollierung flexibel und anpassbar zu gestalten.
+
+### Einrichtung von Handlers
+
+1. **FileHandler**: Dieser Handler schreibt Log-Nachrichten in eine Datei. Er ist nützlich für die langfristige Speicherung von Logs und die spätere Analyse.
+
+ ```python
+ file_handler = logging.FileHandler('app.log')
+ file_handler.setLevel(logging.INFO)
+ ```
+
+2. **StreamHandler**: Dieser Handler gibt Log-Nachrichten auf der Konsole aus. Er ist besonders nützlich für die Echtzeitüberwachung während der Entwicklung und des Debuggings.
+
+ ```python
+ stream_handler = logging.StreamHandler()
+ stream_handler.setLevel(logging.DEBUG)
+ ```
+
+3. **Custom Handler**: Bei Bedarf können benutzerdefinierte Handler erstellt werden, um spezielle Anforderungen zu erfüllen, wie z.B. das Senden von Logs an externe Systeme oder Dienste.
+
+### Beispiel für die Handler-Konfiguration
+
+```python
+logger = logging.getLogger('agent_chat_system')
+logger.setLevel(logging.DEBUG)
+
+# Hinzufügen der Handler zum Logger
+logger.addHandler(file_handler)
+logger.addHandler(stream_handler)
+```
+
+In diesem Beispiel wird ein Logger mit dem Namen `agent_chat_system` erstellt und auf das Level `DEBUG` gesetzt. Die zuvor definierten `FileHandler` und `StreamHandler` werden dem Logger hinzugefügt, um die Log-Nachrichten sowohl in eine Datei als auch auf der Konsole auszugeben.
+
+Durch die sorgfältige Konfiguration von Logging und Handlers kann die FastAPI-Anwendung effektiv überwacht und gewartet werden, was zu einer verbesserten Stabilität und Fehlerbehebung führt.
+
+# Benutzerverwaltung
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird die Benutzerverwaltung detailliert beschrieben. Die Benutzerverwaltung ist ein zentraler Bestandteil des Systems, da sie die Zuweisung von Rollen und Berechtigungen sowie die Authentifizierungsmechanismen umfasst. Diese Aspekte sind entscheidend für die Sicherheit und Effizienz des Systems.
+
+## Rollen und Berechtigungen
+
+### Benutzerrollen
+
+Benutzerrollen definieren die verschiedenen Zugriffsebenen und Verantwortlichkeiten innerhalb des Agent Chat Systems. Jede Rolle hat spezifische Berechtigungen, die den Zugriff auf bestimmte Funktionen und Daten steuern. Die Hauptrollen sind:
+
+- **Administrator**: Hat umfassende Berechtigungen, einschließlich der Verwaltung von Benutzern, Rollen und Systemeinstellungen.
+- **Agent**: Kann auf die Chat-Funktionalitäten zugreifen und mit Kunden interagieren.
+- **Supervisor**: Überwacht die Aktivitäten der Agenten und hat Zugriff auf Berichte und Analysen.
+
+### Berechtigungen
+
+Berechtigungen sind spezifische Rechte, die einer Rolle zugewiesen werden. Sie bestimmen, welche Aktionen ein Benutzer innerhalb des Systems ausführen kann. Beispiele für Berechtigungen sind:
+
+- Zugriff auf das Dashboard
+- Verwaltung von Benutzerkonten
+- Einsicht in Berichte und Statistiken
+- Konfiguration von Systemeinstellungen
+
+Die Zuweisung von Rollen und Berechtigungen erfolgt über die Administrationsoberfläche des Systems, wo Administratoren die Möglichkeit haben, Benutzerkonten zu erstellen und zu verwalten.
+
+## Authentifizierung
+
+### Authentifizierungsmechanismen
+
+Die Authentifizierung ist ein kritischer Sicherheitsaspekt des Agent Chat Systems. Sie stellt sicher, dass nur autorisierte Benutzer Zugriff auf das System erhalten. Die gängigen Authentifizierungsmechanismen umfassen:
+
+- **Passwortbasierte Authentifizierung**: Benutzer melden sich mit einem Benutzernamen und einem Passwort an. Es wird empfohlen, starke Passwörter zu verwenden und regelmäßige Passwortänderungen durchzuführen.
+
+- **Zwei-Faktor-Authentifizierung (2FA)**: Erhöht die Sicherheit, indem ein zusätzlicher Verifizierungsschritt hinzugefügt wird, z.B. ein einmaliger Code, der an das Mobiltelefon des Benutzers gesendet wird.
+
+- **OAuth 2.0**: Ermöglicht die Authentifizierung über Drittanbieter, wie Google oder Facebook, was den Anmeldeprozess für Benutzer vereinfacht und die Sicherheit erhöht.
+
+### Implementierung in FastAPI
+
+Die FastAPI-Anwendung implementiert diese Authentifizierungsmechanismen durch die Integration von Sicherheitsprotokollen und Middleware. Die Konfiguration erfolgt in der `app.py` Datei, wo die Authentifizierungslogik definiert ist. Hier ein Beispiel für die Implementierung der passwortbasierten Authentifizierung:
+
+```python
+from fastapi import FastAPI, Depends
+from fastapi.security import OAuth2PasswordBearer
+
+app = FastAPI()
+
+oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
+
+@app.post("/token")
+async def login(form_data: OAuth2PasswordRequestForm = Depends()):
+ # Authentifizierungslogik hier
+ return {"access_token": "token", "token_type": "bearer"}
+```
+
+Diese Struktur ermöglicht eine flexible und sichere Verwaltung der Benutzerzugriffe und gewährleistet, dass das System den aktuellen Sicherheitsstandards entspricht.
+
+Durch die sorgfältige Verwaltung von Rollen, Berechtigungen und Authentifizierungsmechanismen wird sichergestellt, dass das Agent Chat System sowohl sicher als auch effizient betrieben werden kann.
+
+# Mandate Management
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird das Mandatsmanagement detailliert beschrieben. Das Mandatsmanagement ist ein wesentlicher Bestandteil des Systems, der die Erstellung und Verwaltung von Mandaten umfasst. Diese Funktionen sind entscheidend für die Organisation und den Betrieb des Agenten-Chat-Systems.
+
+## Mandate Creation
+
+### Erstellung von Mandaten
+
+Die Erstellung von Mandaten ist der erste Schritt im Mandatsmanagement. Ein Mandat definiert die spezifischen Aufgaben und Verantwortlichkeiten, die einem Agenten oder einer Gruppe von Agenten zugewiesen werden. Die Erstellung eines Mandats erfolgt in mehreren Schritten:
+
+1. **Initialisierung**: Beginnen Sie mit der Definition der grundlegenden Parameter des Mandats, einschließlich des Titels, der Beschreibung und der beteiligten Agenten.
+
+2. **Zuweisung von Aufgaben**: Weisen Sie spezifische Aufgaben oder Ziele zu, die im Rahmen des Mandats erreicht werden sollen. Diese Aufgaben sollten klar definiert und messbar sein.
+
+3. **Festlegung von Fristen**: Bestimmen Sie die zeitlichen Rahmenbedingungen für das Mandat, einschließlich Start- und Enddatum sowie Meilensteine.
+
+4. **Ressourcenzuweisung**: Stellen Sie sicher, dass die notwendigen Ressourcen, wie z.B. technische Tools oder Daten, den Agenten zur Verfügung stehen.
+
+5. **Genehmigung**: Das Mandat muss von einer autorisierten Person oder einem Gremium genehmigt werden, bevor es aktiv wird.
+
+## Mandate Lifecycle
+
+### Lebenszyklus eines Mandats
+
+Der Lebenszyklus eines Mandats umfasst mehrere Phasen, die sicherstellen, dass das Mandat effektiv verwaltet und abgeschlossen wird:
+
+1. **Initiierung**: Nach der Erstellung wird das Mandat offiziell gestartet. Alle beteiligten Parteien werden informiert und die notwendigen Ressourcen bereitgestellt.
+
+2. **Durchführung**: In dieser Phase arbeiten die Agenten an den zugewiesenen Aufgaben. Fortschritte werden regelmäßig überwacht und dokumentiert.
+
+3. **Überwachung und Anpassung**: Der Fortschritt des Mandats wird kontinuierlich überwacht. Bei Bedarf werden Anpassungen vorgenommen, um sicherzustellen, dass die Ziele erreicht werden.
+
+4. **Abschluss**: Nach Erreichen der Ziele oder dem Ende der Laufzeit wird das Mandat abgeschlossen. Eine abschließende Bewertung wird durchgeführt, um den Erfolg zu messen und Erkenntnisse für zukünftige Mandate zu gewinnen.
+
+5. **Archivierung**: Alle relevanten Dokumente und Berichte werden archiviert, um eine Nachverfolgbarkeit und Referenz für zukünftige Projekte zu gewährleisten.
+
+## Managing Mandates
+
+### Verwaltung von Mandaten
+
+Die Verwaltung von Mandaten erfordert eine kontinuierliche Überwachung und Anpassung, um sicherzustellen, dass die gesetzten Ziele erreicht werden. Zu den wichtigsten Verwaltungsaufgaben gehören:
+
+- **Statusüberprüfung**: Regelmäßige Überprüfung des Status und Fortschritts des Mandats.
+- **Kommunikation**: Sicherstellen, dass alle Beteiligten über den Fortschritt und etwaige Änderungen informiert sind.
+- **Risikomanagement**: Identifizierung und Management potenzieller Risiken, die den Erfolg des Mandats gefährden könnten.
+- **Berichterstattung**: Erstellung regelmäßiger Berichte, um den Fortschritt zu dokumentieren und Transparenz zu gewährleisten.
+
+Durch die sorgfältige Erstellung und Verwaltung von Mandaten wird sichergestellt, dass das Agenten-Chat-System effizient und effektiv arbeitet, um die gesteckten Ziele zu erreichen.
+
+# Attribute Handling
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird die Handhabung von Attributen im System detailliert beschrieben. Attribute sind wesentliche Komponenten, die zur Verwaltung und Organisation von Daten innerhalb des Systems verwendet werden. Dieser Abschnitt behandelt die verschiedenen Typen von Attributen und die Operationen, die auf ihnen ausgeführt werden können.
+
+## Typen von Attributen
+
+Attribute im Agent Chat System sind in verschiedene Kategorien unterteilt, die jeweils spezifische Funktionen und Anwendungsbereiche haben. Die wichtigsten Attributtypen sind:
+
+### 1. Systemattribute
+Systemattribute sind vordefinierte Attribute, die für die grundlegende Funktionalität des Systems erforderlich sind. Sie werden automatisch vom System verwaltet und können nicht vom Benutzer geändert werden. Beispiele für Systemattribute sind Benutzer-ID, Erstellungsdatum und Änderungsdatum.
+
+### 2. Benutzerdefinierte Attribute
+Benutzerdefinierte Attribute werden von den Benutzern erstellt, um spezifische Anforderungen zu erfüllen. Diese Attribute bieten Flexibilität und Anpassungsfähigkeit, indem sie es den Benutzern ermöglichen, zusätzliche Informationen zu speichern, die nicht durch Systemattribute abgedeckt sind. Ein Beispiel könnte ein Attribut für die bevorzugte Sprache eines Benutzers sein.
+
+### 3. Temporäre Attribute
+Temporäre Attribute werden für kurzfristige Zwecke erstellt und haben eine begrenzte Lebensdauer. Sie werden häufig in Sitzungen oder für bestimmte Operationen verwendet, bei denen die Daten nicht dauerhaft gespeichert werden müssen. Ein Beispiel wäre ein Attribut, das während einer Chat-Sitzung verwendet wird, um den aktuellen Status eines Gesprächs zu verfolgen.
+
+## Operationen auf Attributen
+
+Die Verwaltung von Attributen umfasst eine Vielzahl von Operationen, die es ermöglichen, Attribute zu erstellen, zu ändern, zu löschen und zu analysieren. Die wichtigsten Operationen sind:
+
+### 1. Erstellung von Attributen
+Die Erstellung von Attributen erfolgt entweder automatisch durch das System (für Systemattribute) oder manuell durch den Benutzer (für benutzerdefinierte Attribute). Bei der Erstellung eines Attributs müssen der Attributtyp, der Name und der Datentyp spezifiziert werden.
+
+### 2. Aktualisierung von Attributen
+Attribute können aktualisiert werden, um Änderungen in den Daten widerzuspiegeln. Dies umfasst das Ändern von Attributwerten oder das Aktualisieren von Attributmetadaten. Beispielsweise kann ein Benutzer das Attribut "Telefonnummer" aktualisieren, um eine neue Nummer zu speichern.
+
+### 3. Löschung von Attributen
+Nicht mehr benötigte Attribute können gelöscht werden. Bei der Löschung von Attributen ist Vorsicht geboten, da dies irreversible Änderungen an den gespeicherten Daten zur Folge haben kann. Systemattribute können in der Regel nicht gelöscht werden, um die Integrität des Systems zu gewährleisten.
+
+### 4. Abfrage von Attributen
+Das System ermöglicht die Abfrage von Attributen, um Informationen zu extrahieren und Berichte zu erstellen. Dies ist besonders nützlich für die Analyse von Daten und die Generierung von Einblicken. Beispielsweise kann ein Administrator eine Abfrage durchführen, um alle Benutzer mit einem bestimmten Attributwert zu identifizieren.
+
+### 5. Validierung von Attributen
+Die Validierung von Attributen stellt sicher, dass die eingegebenen Daten den festgelegten Kriterien entsprechen. Dies umfasst die Überprüfung von Datentypen, Wertebereichen und anderen Einschränkungen. Eine korrekte Validierung ist entscheidend, um Datenintegrität und -konsistenz zu gewährleisten.
+
+Durch das Verständnis der verschiedenen Attributtypen und der auf ihnen ausführbaren Operationen können Benutzer das Agent Chat System effektiver nutzen und an ihre spezifischen Bedürfnisse anpassen.
+
+# Prompt Management
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird die Verwaltung von Prompts behandelt. Prompts sind wesentliche Bestandteile des Agent Chat Systems, da sie die Interaktion zwischen Benutzern und dem System steuern. Dieser Abschnitt bietet eine detaillierte Anleitung zur Erstellung und Nutzung von Prompts.
+
+## Erstellen von Prompts
+
+Die Erstellung von Prompts ist ein zentraler Bestandteil der Systemkonfiguration und ermöglicht es, spezifische Anfragen oder Anweisungen für die Interaktion mit dem System zu definieren.
+
+### Schritte zur Erstellung von Prompts
+
+1. **Identifikation des Bedarfs**: Bestimmen Sie den spezifischen Bedarf oder das Szenario, für das ein Prompt erforderlich ist. Dies könnte eine häufig gestellte Frage oder eine spezifische Anweisung sein, die regelmäßig benötigt wird.
+
+2. **Definition des Inhalts**: Formulieren Sie den Inhalt des Prompts klar und präzise. Der Inhalt sollte direkt und verständlich sein, um Missverständnisse zu vermeiden.
+
+3. **Formatierung**: Achten Sie darauf, dass der Prompt in einem konsistenten Format erstellt wird, das mit den anderen Systemkomponenten kompatibel ist. Nutzen Sie Markdown oder andere unterstützte Formate, um die Lesbarkeit zu verbessern.
+
+4. **Implementierung im System**: Integrieren Sie den erstellten Prompt in das System. Dies kann durch die Anpassung der entsprechenden Konfigurationsdateien oder durch die Nutzung der API-Schnittstellen erfolgen.
+
+### Beispiel
+
+```json
+{
+ "prompt_id": "faq_shipping",
+ "content": "Wie lange dauert der Versand?",
+ "response": "Der Versand dauert in der Regel 3-5 Werktage."
+}
+```
+
+## Nutzung von Prompts
+
+Die Nutzung von Prompts ist entscheidend, um eine effiziente und konsistente Kommunikation innerhalb des Agent Chat Systems sicherzustellen.
+
+### Schritte zur Nutzung von Prompts
+
+1. **Abrufen von Prompts**: Verwenden Sie die API-Endpunkte, um verfügbare Prompts abzurufen. Dies ermöglicht es Agenten, schnell auf vorgefertigte Antworten zuzugreifen.
+
+2. **Anpassung an den Kontext**: Stellen Sie sicher, dass der ausgewählte Prompt dem aktuellen Kontext der Benutzeranfrage entspricht. Passen Sie den Inhalt gegebenenfalls an, um spezifische Details oder Variationen zu berücksichtigen.
+
+3. **Feedback und Optimierung**: Sammeln Sie regelmäßig Feedback zur Effektivität der Prompts und optimieren Sie diese basierend auf den Rückmeldungen. Dies kann durch die Analyse von Benutzerinteraktionen und die Anpassung der Inhalte erfolgen.
+
+### Beispiel
+
+Ein Agent erhält eine Anfrage zu den Versandzeiten. Anstatt die Antwort manuell zu formulieren, ruft der Agent den entsprechenden Prompt ab und liefert eine konsistente und schnelle Antwort.
+
+```json
+{
+ "user_query": "Wann kommt meine Bestellung an?",
+ "prompt_used": "faq_shipping",
+ "response": "Der Versand dauert in der Regel 3-5 Werktage."
+}
+```
+
+Durch die strukturierte Verwaltung und Nutzung von Prompts wird die Effizienz des Agent Chat Systems erheblich gesteigert, was zu einer verbesserten Benutzererfahrung führt.
+
+# File Operations
+
+In diesem Abschnitt des "Agent Chat System Handbook" werden die wesentlichen Aspekte der Dateiverwaltung und -speicherung im Rahmen des Agent Chat Systems behandelt. Diese Informationen sind entscheidend für die technische Verwaltung und den Betrieb des Systems. Der Abschnitt ist in zwei Hauptunterabschnitte unterteilt: "Handling Files" und "Storing Files".
+
+## Handling Files
+
+Die Handhabung von Dateien ist ein zentraler Bestandteil des Agent Chat Systems, da es die Interaktion mit verschiedenen Dateitypen ermöglicht, die für die Funktionalität des Systems erforderlich sind.
+
+### Dateiverwaltung
+
+- **Öffnen und Schließen von Dateien**: Dateien sollten mit geeigneten Methoden geöffnet und geschlossen werden, um Datenverlust oder -beschädigung zu vermeiden. In Python wird dies häufig mit dem `with`-Statement erreicht, das sicherstellt, dass Dateien nach der Verwendung ordnungsgemäß geschlossen werden.
+
+ ```python
+ with open('datei.txt', 'r') as file:
+ inhalt = file.read()
+ ```
+
+- **Lesen und Schreiben von Dateien**: Das System unterstützt sowohl das Lesen als auch das Schreiben von Dateien. Es ist wichtig, die korrekten Modi (`'r'` für Lesen, `'w'` für Schreiben, `'a'` für Anhängen) zu verwenden, um die Integrität der Daten zu gewährleisten.
+
+- **Fehlerbehandlung**: Beim Umgang mit Dateien können verschiedene Fehler auftreten, wie z.B. `FileNotFoundError` oder `IOError`. Eine robuste Fehlerbehandlung ist notwendig, um das System vor unerwarteten Abstürzen zu schützen.
+
+ ```python
+ try:
+ with open('datei.txt', 'r') as file:
+ inhalt = file.read()
+ except FileNotFoundError:
+ print("Die Datei wurde nicht gefunden.")
+ ```
+
+## Storing Files
+
+Die Speicherung von Dateien ist ein weiterer kritischer Aspekt, der sicherstellt, dass Daten sicher und effizient abgelegt werden.
+
+### Dateispeicherung
+
+- **Verzeichnisstruktur**: Eine gut organisierte Verzeichnisstruktur ist entscheidend für die effiziente Speicherung und den schnellen Zugriff auf Dateien. Es wird empfohlen, Dateien in thematisch geordneten Unterverzeichnissen zu speichern.
+
+- **Datenbankintegration**: In einigen Fällen kann es notwendig sein, Dateien in einer Datenbank zu speichern, insbesondere wenn Metadaten oder eine hohe Zugriffsgeschwindigkeit erforderlich sind. Das System kann Datenbanklösungen wie PostgreSQL oder MongoDB integrieren, um Dateien und ihre Metadaten zu verwalten.
+
+- **Sicherheitsaspekte**: Bei der Speicherung von Dateien müssen Sicherheitsaspekte berücksichtigt werden, insbesondere wenn es sich um sensible Daten handelt. Dies umfasst die Verschlüsselung von Dateien und die Implementierung von Zugriffsberechtigungen.
+
+- **Backup und Wiederherstellung**: Regelmäßige Backups sind unerlässlich, um Datenverlust zu vermeiden. Das System sollte über Mechanismen zur automatisierten Sicherung und Wiederherstellung von Dateien verfügen.
+
+Durch die Beachtung dieser Richtlinien und Praktiken wird sichergestellt, dass das Agent Chat System Dateien effizient und sicher handhabt und speichert, was zu einem reibungslosen Betrieb und einer hohen Zuverlässigkeit des Systems beiträgt.
+
+# Workflow Management
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird das Workflow-Management detailliert beschrieben. Das Ziel ist es, den technischen Benutzern ein umfassendes Verständnis für die Erstellung und Ausführung von Workflows innerhalb des Agent Chat Systems zu vermitteln.
+
+## Inhaltsverzeichnis
+1. [Erstellung von Workflows](#erstellung-von-workflows)
+2. [Ausführung von Workflows](#ausfuehrung-von-workflows)
+
+## Erstellung von Workflows
+
+Die Erstellung von Workflows ist ein zentraler Bestandteil des Workflow-Managements im Agent Chat System. Ein Workflow definiert eine Abfolge von Schritten, die automatisiert oder manuell ausgeführt werden können, um spezifische Aufgaben oder Prozesse zu steuern.
+
+### Schritte zur Erstellung eines Workflows
+
+1. **Identifikation der Anforderungen**: Bestimmen Sie die spezifischen Anforderungen und Ziele des Workflows. Dies könnte die Automatisierung von Kundenanfragen oder die Verwaltung von Support-Tickets umfassen.
+
+2. **Definition der Schritte**: Listen Sie die einzelnen Schritte auf, die zur Erreichung des Workflows erforderlich sind. Jeder Schritt sollte klar definiert und in einer logischen Reihenfolge angeordnet sein.
+
+3. **Konfiguration der Aktionen**: Weisen Sie jedem Schritt spezifische Aktionen zu. Diese Aktionen könnten API-Aufrufe, Datenbankabfragen oder Benachrichtigungen umfassen.
+
+4. **Erstellung von Bedingungen**: Definieren Sie Bedingungen, die den Fluss des Workflows steuern. Bedingungen können auf Ereignissen, Datenwerten oder Benutzerinteraktionen basieren.
+
+5. **Testen des Workflows**: Vor der Implementierung sollte der Workflow in einer Testumgebung ausgeführt werden, um sicherzustellen, dass alle Schritte korrekt funktionieren.
+
+6. **Dokumentation**: Dokumentieren Sie den Workflow umfassend, einschließlich der Ziele, Schritte, Bedingungen und erwarteten Ergebnisse.
+
+### Beispiel
+
+```yaml
+- name: "Kundenanfrage-Workflow"
+ steps:
+ - step: "Anfrage erhalten"
+ action: "API-Aufruf"
+ - step: "Anfrage analysieren"
+ action: "AI-Analyse"
+ - step: "Antwort generieren"
+ action: "Textgenerierung"
+ - step: "Antwort senden"
+ action: "Benachrichtigung"
+ conditions:
+ - if: "Anfrage enthält 'dringend'"
+ then: "Priorität hochsetzen"
+```
+
+## Ausführung von Workflows
+
+Die Ausführung von Workflows ist der Prozess, bei dem die definierten Schritte eines Workflows in der Praxis umgesetzt werden. Dies kann manuell durch einen Benutzer oder automatisch durch das System erfolgen.
+
+### Schritte zur Ausführung eines Workflows
+
+1. **Initiierung**: Der Workflow wird entweder durch ein Ereignis, eine Benutzeraktion oder einen Zeitplan initiiert.
+
+2. **Verarbeitung der Schritte**: Jeder Schritt des Workflows wird in der festgelegten Reihenfolge ausgeführt. Das System überwacht den Fortschritt und stellt sicher, dass alle Bedingungen erfüllt sind, bevor zum nächsten Schritt übergegangen wird.
+
+3. **Überwachung und Protokollierung**: Während der Ausführung werden alle Aktionen und Ergebnisse protokolliert. Dies ermöglicht eine spätere Analyse und Fehlerbehebung.
+
+4. **Fehlerbehandlung**: Bei Auftreten eines Fehlers wird der Workflow entweder pausiert oder abgebrochen, je nach Konfiguration. Fehlerprotokolle werden erstellt, um die Ursache zu identifizieren und zu beheben.
+
+5. **Abschluss**: Nach erfolgreicher Ausführung aller Schritte wird der Workflow abgeschlossen und eine Zusammenfassung der Ergebnisse erstellt.
+
+### Beispiel
+
+```json
+{
+ "workflow_id": "12345",
+ "status": "in_progress",
+ "current_step": "Anfrage analysieren",
+ "logs": [
+ {"timestamp": "2023-10-01T10:00:00Z", "message": "Anfrage erhalten"},
+ {"timestamp": "2023-10-01T10:01:00Z", "message": "Anfrage analysieren gestartet"}
+ ]
+}
+```
+
+Durch die sorgfältige Erstellung und Ausführung von Workflows können Unternehmen die Effizienz und Genauigkeit ihrer Prozesse erheblich verbessern. Das Agent Chat System bietet die Flexibilität und Kontrolle, die erforderlich sind, um komplexe Workflows effektiv zu verwalten.
+
+# AI Integration
+
+In diesem Abschnitt wird die Integration von Künstlicher Intelligenz (KI) in das Agent Chat System detailliert beschrieben. Der Fokus liegt auf den verwendeten KI-Modellen und den Integrationspunkten innerhalb des Systems. Diese Informationen sind entscheidend für das Verständnis der technischen Architektur und der Funktionsweise der KI-Komponenten.
+
+## AI Models
+
+### Verwendete KI-Modelle
+
+Das Agent Chat System nutzt fortschrittliche KI-Modelle, um die Interaktion zwischen Agenten und Nutzern zu optimieren. Diese Modelle sind darauf ausgelegt, natürliche Sprachverarbeitung (NLP) zu unterstützen und kontextbezogene Antworten zu generieren. Die wichtigsten Modelle umfassen:
+
+- **GPT-3**: Ein leistungsstarkes Sprachmodell, das für die Generierung von menschenähnlichen Texten verwendet wird. Es ist in der Lage, komplexe Anfragen zu verstehen und relevante Antworten zu liefern.
+- **BERT**: Ein Modell, das für Aufgaben der Sprachverständnisoptimierung eingesetzt wird, insbesondere bei der Analyse von Benutzeranfragen und der Extraktion von Schlüsselinformationen.
+- **Custom Sentiment Analysis Model**: Ein speziell entwickeltes Modell zur Analyse der Stimmung in Benutzeranfragen, um die Reaktionen der Agenten entsprechend anzupassen.
+
+Diese Modelle werden kontinuierlich aktualisiert und optimiert, um die Effizienz und Genauigkeit der Interaktionen zu verbessern.
+
+## Integration
+
+### Integrationspunkte
+
+Die Integration der KI-Modelle erfolgt an mehreren strategischen Punkten innerhalb des Agent Chat Systems. Diese Integrationspunkte sind entscheidend für die nahtlose Funktionalität und umfassen:
+
+- **Anfrageverarbeitung**: Bei der Eingabe einer Benutzeranfrage wird diese zunächst durch das NLP-Modul geleitet, das die Anfrage analysiert und an das entsprechende KI-Modell weiterleitet.
+- **Antwortgenerierung**: Die generierten Antworten werden durch das GPT-3-Modell erstellt und anschließend durch das Sentiment Analysis Model überprüft, um sicherzustellen, dass die Antwort dem emotionalen Kontext des Benutzers entspricht.
+- **Datenanalyse**: Die gesammelten Daten aus den Interaktionen werden durch BERT analysiert, um Muster und Trends zu identifizieren, die zur Verbesserung der Systemleistung beitragen können.
+- **Feedback-Schleife**: Eine kontinuierliche Feedback-Schleife ermöglicht es, die Modelle basierend auf Benutzerinteraktionen und Agenten-Feedback zu verfeinern und anzupassen.
+
+### Technische Implementierung
+
+Die Implementierung der KI-Integration erfolgt über spezialisierte APIs, die in die FastAPI-Anwendung eingebettet sind. Diese APIs ermöglichen eine effiziente Kommunikation zwischen den verschiedenen Modulen und den KI-Modellen. Die Integration ist so gestaltet, dass sie skalierbar und erweiterbar ist, um zukünftige Anforderungen und technologische Fortschritte zu berücksichtigen.
+
+Durch die sorgfältige Auswahl und Integration dieser KI-Modelle wird sichergestellt, dass das Agent Chat System nicht nur effizient, sondern auch flexibel und anpassungsfähig bleibt, um den sich ständig ändernden Anforderungen der Benutzer gerecht zu werden.
+
+## Authentication
+
+```md
+# Authentication
+
+In diesem Abschnitt des "Agent Chat System Handbook" wird das Authentifizierungssystem detailliert beschrieben. Die Authentifizierung ist ein kritischer Bestandteil des Systems, der sicherstellt, dass nur autorisierte Benutzer Zugriff auf die Anwendung und ihre Funktionen haben. Wir werden die verschiedenen Authentifizierungsmethoden sowie die Sicherheitsmaßnahmen, die implementiert wurden, um die Integrität und Vertraulichkeit der Benutzerdaten zu gewährleisten, untersuchen.
+
+## Methoden
+
+### Authentifizierungsmethoden
+
+Das Agent Chat System unterstützt mehrere Authentifizierungsmethoden, um Flexibilität und Sicherheit zu bieten. Die wichtigsten Methoden sind:
+
+1. **Token-basierte Authentifizierung**:
+ - **Beschreibung**: Diese Methode verwendet JSON Web Tokens (JWT), um Benutzer zu authentifizieren. Nach erfolgreicher Anmeldung erhält der Benutzer ein Token, das bei jeder Anfrage an den Server gesendet wird.
+ - **Vorteile**: Erhöhte Sicherheit durch zeitlich begrenzte Token und die Möglichkeit, Token zu widerrufen.
+ - **Implementierung**: Der Token wird im Header der HTTP-Anfrage übermittelt und vom Server validiert.
+
+2. **OAuth 2.0**:
+ - **Beschreibung**: OAuth 2.0 ist ein weit verbreitetes Protokoll, das es Benutzern ermöglicht, sich mit ihren bestehenden Konten von Drittanbietern (z.B. Google, Facebook) anzumelden.
+ - **Vorteile**: Benutzerfreundlichkeit und erhöhte Sicherheit, da keine Passwörter direkt im System gespeichert werden müssen.
+ - **Implementierung**: Die Anwendung leitet den Benutzer zur Authentifizierungsseite des Drittanbieters weiter und erhält nach erfolgreicher Authentifizierung ein Zugriffstoken.
+
+3. **Zwei-Faktor-Authentifizierung (2FA)**:
+ - **Beschreibung**: Diese Methode fügt eine zusätzliche Sicherheitsebene hinzu, indem sie einen zweiten Authentifizierungsfaktor erfordert, z.B. einen SMS-Code oder eine Authentifizierungs-App.
+ - **Vorteile**: Erhöhte Sicherheit durch die Kombination von etwas, das der Benutzer kennt (Passwort) und etwas, das der Benutzer hat (zweiter Faktor).
+ - **Implementierung**: Nach der Eingabe des Passworts wird der Benutzer aufgefordert, den zweiten Faktor einzugeben, bevor der Zugriff gewährt wird.
+
+## Sicherheit
+
+### Sicherheitsmaßnahmen
+
+Um die Sicherheit der Authentifizierung im Agent Chat System zu gewährleisten, wurden mehrere Maßnahmen implementiert:
+
+1. **Datenverschlüsselung**:
+ - Alle sensiblen Daten, einschließlich Passwörter und Token, werden mit starken Verschlüsselungsalgorithmen gespeichert und übertragen. Dies schützt die Daten vor unbefugtem Zugriff und Manipulation.
+
+2. **Sichere Passwortspeicherung**:
+ - Passwörter werden nicht im Klartext gespeichert. Stattdessen werden sie mit einem sicheren Hashing-Algorithmus (z.B. bcrypt) gehasht, bevor sie in der Datenbank gespeichert werden.
+
+3. **Regelmäßige Sicherheitsüberprüfungen**:
+ - Das System wird regelmäßig auf Sicherheitslücken überprüft, und es werden Patches und Updates angewendet, um bekannte Schwachstellen zu beheben.
+
+4. **Sitzungsverwaltung**:
+ - Sitzungen werden überwacht und bei Inaktivität automatisch abgemeldet, um das Risiko von Sitzungsentführungen zu minimieren.
+
+5. **Protokollierung und Überwachung**:
+ - Alle Authentifizierungsversuche und sicherheitsrelevanten Ereignisse werden protokolliert und überwacht, um verdächtige Aktivitäten frühzeitig zu erkennen und darauf zu reagieren.
+
+Durch die Implementierung dieser Methoden und Sicherheitsmaßnahmen stellt das Agent Chat System sicher, dass die Authentifizierung sowohl benutzerfreundlich als auch sicher ist, und schützt die Integrität und Vertraulichkeit der Benutzerdaten effektiv.
+```
+
+
+## Conclusion
+
+```md
+## Fazit
+
+In diesem Handbuch zum "Agent Chat System" haben wir die wesentlichen Komponenten und Prozesse detailliert beschrieben, die für den erfolgreichen Einsatz und die Verwaltung eines Chat-Agenten-Systems erforderlich sind. Die behandelten Themen umfassen die Einrichtung der FastAPI-Anwendung, das Benutzer- und Mandatsmanagement, die Attributverwaltung, das Prompt-Management, Dateioperationen, das Workflow-Management, die Integration von Künstlicher Intelligenz sowie die Authentifizierung.
+
+### Zusammenfassung der Hauptpunkte
+
+1. **FastAPI Setup**: Wir haben die Schritte zur Einrichtung und Konfiguration der FastAPI-Anwendung erläutert, um eine stabile Grundlage für das Agentensystem zu schaffen.
+
+2. **Benutzerverwaltung**: Die Verwaltung von Benutzern und deren Rollen ist entscheidend für die Sicherheit und Effizienz des Systems. Wir haben die Methoden zur Erstellung, Aktualisierung und Löschung von Benutzerkonten behandelt.
+
+3. **AI-Integration**: Die Integration von KI-Technologien ermöglicht es dem System, intelligentere und kontextbezogene Antworten zu generieren. Wir haben die Implementierung und Optimierung dieser Funktionalität beschrieben.
+
+4. **Authentifizierung**: Sicherheit ist ein zentrales Element jeder Anwendung. Wir haben die Authentifizierungsmechanismen und deren Implementierung im System detailliert dargestellt.
+
+### Empfehlungen und nächste Schritte
+
+- **Regelmäßige Updates**: Stellen Sie sicher, dass alle Systemkomponenten regelmäßig aktualisiert werden, um Sicherheitslücken zu schließen und die Leistung zu optimieren.
+- **Erweiterung der AI-Funktionalitäten**: Erwägen Sie die Implementierung fortschrittlicherer KI-Modelle, um die Interaktionsqualität weiter zu verbessern.
+- **Benutzerfeedback einholen**: Nutzen Sie das Feedback der Benutzer, um kontinuierlich Verbesserungen am System vorzunehmen.
+
+### Bedeutung des Dokuments
+
+Dieses Handbuch dient als umfassende Ressource für technische Fachleute, die für die Implementierung und Wartung des Agent Chat Systems verantwortlich sind. Es bietet nicht nur eine detaillierte Anleitung zur Einrichtung und Verwaltung des Systems, sondern auch wertvolle Einblicke in die Optimierung der Benutzererfahrung und der Systemleistung. Mit diesem Wissen sind Sie bestens gerüstet, um ein effizientes und sicheres Chat-Agenten-System zu betreiben.
+
+Wir hoffen, dass dieses Handbuch Ihnen als wertvolle Referenz dient und Sie bei der erfolgreichen Implementierung und Verwaltung Ihres Agent Chat Systems unterstützt.
+```
diff --git a/routes/routeWorkflows.py b/routes/routeWorkflows.py
index 7cf56c72..622198b4 100644
--- a/routes/routeWorkflows.py
+++ b/routes/routeWorkflows.py
@@ -592,16 +592,16 @@ async def previewFile(
base64Encoded = not isText
if isText:
- # Convert to string and limit to 1000 chars for preview
+ # Convert to string without trim for preview
if isinstance(fileData, bytes):
try:
- filePreview = fileData.decode('utf-8')[:1000]
+ filePreview = fileData.decode('utf-8')
previewData = filePreview
except UnicodeDecodeError:
# Try other encodings
for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
try:
- filePreview = fileData.decode(encoding)[:1000]
+ filePreview = fileData.decode(encoding)
previewData = filePreview
break
except UnicodeDecodeError:
@@ -613,7 +613,7 @@ async def previewFile(
previewData = base64.b64encode(fileData).decode('utf-8')
base64Encoded = True
- # Return file metadata with limited preview and base64Encoded flag
+ # Return file metadata with preview and base64Encoded flag
return {
"id": fileId,
"name": file.get("name"),
diff --git a/static/100_first_77_primes.txt b/static/100_first_77_primes.txt
deleted file mode 100644
index 87725e78..00000000
--- a/static/100_first_77_primes.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-2
-3
-5
-7
-11
-13
-17
-19
-23
-29
-31
-37
-41
-43
-47
-53
-59
-61
-67
-71
-73
-79
-83
-89
-97
-101
-103
-107
-109
-113
-127
-131
-137
-139
-149
-151
-157
-163
-167
-173
-179
-181
-191
-193
-197
-199
-211
-223
-227
-229
-233
-239
-241
-251
-257
-263
-269
-271
-277
-281
-283
-293
-307
-311
-313
-317
-331
-337
-347
-349
-353
-359
-367
-373
-379
-383
-389
\ No newline at end of file
diff --git a/static/101_generated_code.py b/static/101_generated_code.py
deleted file mode 100644
index c4ae0f1b..00000000
--- a/static/101_generated_code.py
+++ /dev/null
@@ -1,38 +0,0 @@
-inputFiles = [] # DO NOT CHANGE THIS LINE
-
-def is_prime(n):
- if n <= 1:
- return False
- if n <= 3:
- return True
- if n % 2 == 0 or n % 3 == 0:
- return False
- i = 5
- while i * i <= n:
- if n % i == 0 or n % (i + 2) == 0:
- return False
- i += 6
- return True
-
-def generate_primes(count):
- primes = []
- num = 2
- while len(primes) < count:
- if is_prime(num):
- primes.append(num)
- num += 1
- return primes
-
-primes = generate_primes(123)
-prime_numbers_content = "\n".join(map(str, primes))
-
-result = {
- "prime_numbers.txt": {
- "content": prime_numbers_content,
- "base64Encoded": False,
- "contentType": "text/plain"
- }
-}
-
-import json
-print(json.dumps(result))
\ No newline at end of file
diff --git a/static/102_execution_history.json b/static/102_execution_history.json
deleted file mode 100644
index 2336e057..00000000
--- a/static/102_execution_history.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "attempt": 1,
- "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(123)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
- "result": {
- "success": true,
- "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
- "error": "",
- "result": {
- "prime_numbers.txt": {
- "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677",
- "base64Encoded": false,
- "contentType": "text/plain"
- }
- },
- "exitCode": 0
- }
- }
-]
\ No newline at end of file
diff --git a/static/103_generated_code.py b/static/103_generated_code.py
deleted file mode 100644
index b17aa80e..00000000
--- a/static/103_generated_code.py
+++ /dev/null
@@ -1,40 +0,0 @@
-inputFiles = [] # DO NOT CHANGE THIS LINE
-
-# REQUIREMENTS:
-
-def is_prime(n):
- if n <= 1:
- return False
- if n <= 3:
- return True
- if n % 2 == 0 or n % 3 == 0:
- return False
- i = 5
- while i * i <= n:
- if n % i == 0 or n % (i + 2) == 0:
- return False
- i += 6
- return True
-
-def generate_primes(count):
- primes = []
- num = 2
- while len(primes) < count:
- if is_prime(num):
- primes.append(num)
- num += 1
- return primes
-
-primes = generate_primes(202)
-prime_numbers_content = "\n".join(map(str, primes))
-
-result = {
- "prime_numbers.txt": {
- "content": prime_numbers_content,
- "base64Encoded": False,
- "contentType": "text/plain"
- }
-}
-
-import json
-print(json.dumps(result))
\ No newline at end of file
diff --git a/static/104_execution_history.json b/static/104_execution_history.json
deleted file mode 100644
index d8d0cc2c..00000000
--- a/static/104_execution_history.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "attempt": 1,
- "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(202)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
- "result": {
- "success": true,
- "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
- "error": "",
- "result": {
- "prime_numbers.txt": {
- "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231",
- "base64Encoded": false,
- "contentType": "text/plain"
- }
- },
- "exitCode": 0
- }
- }
-]
\ No newline at end of file
diff --git a/static/105_prime_numbers.txt b/static/105_prime_numbers.txt
deleted file mode 100644
index 75099161..00000000
--- a/static/105_prime_numbers.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-2
-3
-5
-7
-11
-13
-17
-19
-23
-29
-31
-37
-41
-43
-47
-53
-59
-61
-67
-71
-73
-79
-83
-89
-97
-101
-103
-107
-109
-113
-127
-131
-137
-139
-149
-151
-157
-163
-167
-173
-179
-181
-191
-193
-197
-199
-211
-223
-227
-229
-233
-239
-241
-251
-257
-263
-269
-271
-277
-281
-283
-293
-307
-311
-313
-317
-331
-337
-347
-349
-353
-359
-367
-373
-379
-383
-389
-397
-401
-409
-419
-421
-431
-433
-439
-443
-449
-457
-461
-463
-467
-479
-487
-491
-499
-503
-509
-521
-523
-541
-547
-557
-563
-569
-571
-577
-587
-593
-599
-601
-607
-613
-617
-619
-631
-641
-643
-647
-653
-659
-661
-673
-677
-683
-691
-701
-709
-719
-727
-733
-739
-743
-751
-757
-761
-769
-773
-787
-797
-809
-811
-821
-823
-827
-829
-839
-853
-857
-859
-863
-877
-881
-883
-887
-907
-911
-919
-929
-937
-941
-947
-953
-967
-971
-977
-983
-991
-997
-1009
-1013
-1019
-1021
-1031
-1033
-1039
-1049
-1051
-1061
-1063
-1069
-1087
-1091
-1093
-1097
-1103
-1109
-1117
-1123
-1129
-1151
-1153
-1163
-1171
-1181
-1187
-1193
-1201
-1213
-1217
-1223
-1229
-1231
\ No newline at end of file
diff --git a/static/106_generated_code.py b/static/106_generated_code.py
deleted file mode 100644
index 44c125b7..00000000
--- a/static/106_generated_code.py
+++ /dev/null
@@ -1,38 +0,0 @@
-inputFiles = [] # DO NOT CHANGE THIS LINE
-
-def is_prime(n):
- if n <= 1:
- return False
- if n <= 3:
- return True
- if n % 2 == 0 or n % 3 == 0:
- return False
- i = 5
- while i * i <= n:
- if n % i == 0 or n % (i + 2) == 0:
- return False
- i += 6
- return True
-
-def generate_primes(limit):
- primes = []
- num = 2
- while len(primes) < limit:
- if is_prime(num):
- primes.append(num)
- num += 1
- return primes
-
-primes = generate_primes(1000)
-primes_content = "\n".join(map(str, primes))
-
-result = {
- "prime_numbers.txt": {
- "content": primes_content,
- "base64Encoded": False,
- "contentType": "text/plain"
- }
-}
-
-import json
-print(json.dumps(result))
\ No newline at end of file
diff --git a/static/107_execution_history.json b/static/107_execution_history.json
deleted file mode 100644
index 6ff5f3f6..00000000
--- a/static/107_execution_history.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "attempt": 1,
- "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
- "result": {
- "success": true,
- "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\\n7919\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
- "error": "",
- "result": {
- "prime_numbers.txt": {
- "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907\n7919",
- "base64Encoded": false,
- "contentType": "text/plain"
- }
- },
- "exitCode": 0
- }
- }
-]
\ No newline at end of file
diff --git a/static/108_generated_code.py b/static/108_generated_code.py
deleted file mode 100644
index a0f1a176..00000000
--- a/static/108_generated_code.py
+++ /dev/null
@@ -1,38 +0,0 @@
-inputFiles = [] # DO NOT CHANGE THIS LINE
-
-def is_prime(n):
- if n <= 1:
- return False
- if n <= 3:
- return True
- if n % 2 == 0 or n % 3 == 0:
- return False
- i = 5
- while i * i <= n:
- if n % i == 0 or n % (i + 2) == 0:
- return False
- i += 6
- return True
-
-def generate_primes(count):
- primes = []
- num = 2
- while len(primes) < count:
- if is_prime(num):
- primes.append(num)
- num += 1
- return primes
-
-primes = generate_primes(999)
-prime_numbers_content = "\n".join(map(str, primes))
-
-result = {
- "prime_numbers.txt": {
- "content": prime_numbers_content,
- "base64Encoded": False,
- "contentType": "text/plain"
- }
-}
-
-import json
-print(json.dumps(result))
\ No newline at end of file
diff --git a/static/109_execution_history.json b/static/109_execution_history.json
deleted file mode 100644
index e6816758..00000000
--- a/static/109_execution_history.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "attempt": 1,
- "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(999)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
- "result": {
- "success": true,
- "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
- "error": "",
- "result": {
- "prime_numbers.txt": {
- "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907",
- "base64Encoded": false,
- "contentType": "text/plain"
- }
- },
- "exitCode": 0
- }
- }
-]
\ No newline at end of file
diff --git a/static/110_prime_numbers.txt b/static/110_prime_numbers.txt
deleted file mode 100644
index 345ba9b1..00000000
--- a/static/110_prime_numbers.txt
+++ /dev/null
@@ -1,999 +0,0 @@
-2
-3
-5
-7
-11
-13
-17
-19
-23
-29
-31
-37
-41
-43
-47
-53
-59
-61
-67
-71
-73
-79
-83
-89
-97
-101
-103
-107
-109
-113
-127
-131
-137
-139
-149
-151
-157
-163
-167
-173
-179
-181
-191
-193
-197
-199
-211
-223
-227
-229
-233
-239
-241
-251
-257
-263
-269
-271
-277
-281
-283
-293
-307
-311
-313
-317
-331
-337
-347
-349
-353
-359
-367
-373
-379
-383
-389
-397
-401
-409
-419
-421
-431
-433
-439
-443
-449
-457
-461
-463
-467
-479
-487
-491
-499
-503
-509
-521
-523
-541
-547
-557
-563
-569
-571
-577
-587
-593
-599
-601
-607
-613
-617
-619
-631
-641
-643
-647
-653
-659
-661
-673
-677
-683
-691
-701
-709
-719
-727
-733
-739
-743
-751
-757
-761
-769
-773
-787
-797
-809
-811
-821
-823
-827
-829
-839
-853
-857
-859
-863
-877
-881
-883
-887
-907
-911
-919
-929
-937
-941
-947
-953
-967
-971
-977
-983
-991
-997
-1009
-1013
-1019
-1021
-1031
-1033
-1039
-1049
-1051
-1061
-1063
-1069
-1087
-1091
-1093
-1097
-1103
-1109
-1117
-1123
-1129
-1151
-1153
-1163
-1171
-1181
-1187
-1193
-1201
-1213
-1217
-1223
-1229
-1231
-1237
-1249
-1259
-1277
-1279
-1283
-1289
-1291
-1297
-1301
-1303
-1307
-1319
-1321
-1327
-1361
-1367
-1373
-1381
-1399
-1409
-1423
-1427
-1429
-1433
-1439
-1447
-1451
-1453
-1459
-1471
-1481
-1483
-1487
-1489
-1493
-1499
-1511
-1523
-1531
-1543
-1549
-1553
-1559
-1567
-1571
-1579
-1583
-1597
-1601
-1607
-1609
-1613
-1619
-1621
-1627
-1637
-1657
-1663
-1667
-1669
-1693
-1697
-1699
-1709
-1721
-1723
-1733
-1741
-1747
-1753
-1759
-1777
-1783
-1787
-1789
-1801
-1811
-1823
-1831
-1847
-1861
-1867
-1871
-1873
-1877
-1879
-1889
-1901
-1907
-1913
-1931
-1933
-1949
-1951
-1973
-1979
-1987
-1993
-1997
-1999
-2003
-2011
-2017
-2027
-2029
-2039
-2053
-2063
-2069
-2081
-2083
-2087
-2089
-2099
-2111
-2113
-2129
-2131
-2137
-2141
-2143
-2153
-2161
-2179
-2203
-2207
-2213
-2221
-2237
-2239
-2243
-2251
-2267
-2269
-2273
-2281
-2287
-2293
-2297
-2309
-2311
-2333
-2339
-2341
-2347
-2351
-2357
-2371
-2377
-2381
-2383
-2389
-2393
-2399
-2411
-2417
-2423
-2437
-2441
-2447
-2459
-2467
-2473
-2477
-2503
-2521
-2531
-2539
-2543
-2549
-2551
-2557
-2579
-2591
-2593
-2609
-2617
-2621
-2633
-2647
-2657
-2659
-2663
-2671
-2677
-2683
-2687
-2689
-2693
-2699
-2707
-2711
-2713
-2719
-2729
-2731
-2741
-2749
-2753
-2767
-2777
-2789
-2791
-2797
-2801
-2803
-2819
-2833
-2837
-2843
-2851
-2857
-2861
-2879
-2887
-2897
-2903
-2909
-2917
-2927
-2939
-2953
-2957
-2963
-2969
-2971
-2999
-3001
-3011
-3019
-3023
-3037
-3041
-3049
-3061
-3067
-3079
-3083
-3089
-3109
-3119
-3121
-3137
-3163
-3167
-3169
-3181
-3187
-3191
-3203
-3209
-3217
-3221
-3229
-3251
-3253
-3257
-3259
-3271
-3299
-3301
-3307
-3313
-3319
-3323
-3329
-3331
-3343
-3347
-3359
-3361
-3371
-3373
-3389
-3391
-3407
-3413
-3433
-3449
-3457
-3461
-3463
-3467
-3469
-3491
-3499
-3511
-3517
-3527
-3529
-3533
-3539
-3541
-3547
-3557
-3559
-3571
-3581
-3583
-3593
-3607
-3613
-3617
-3623
-3631
-3637
-3643
-3659
-3671
-3673
-3677
-3691
-3697
-3701
-3709
-3719
-3727
-3733
-3739
-3761
-3767
-3769
-3779
-3793
-3797
-3803
-3821
-3823
-3833
-3847
-3851
-3853
-3863
-3877
-3881
-3889
-3907
-3911
-3917
-3919
-3923
-3929
-3931
-3943
-3947
-3967
-3989
-4001
-4003
-4007
-4013
-4019
-4021
-4027
-4049
-4051
-4057
-4073
-4079
-4091
-4093
-4099
-4111
-4127
-4129
-4133
-4139
-4153
-4157
-4159
-4177
-4201
-4211
-4217
-4219
-4229
-4231
-4241
-4243
-4253
-4259
-4261
-4271
-4273
-4283
-4289
-4297
-4327
-4337
-4339
-4349
-4357
-4363
-4373
-4391
-4397
-4409
-4421
-4423
-4441
-4447
-4451
-4457
-4463
-4481
-4483
-4493
-4507
-4513
-4517
-4519
-4523
-4547
-4549
-4561
-4567
-4583
-4591
-4597
-4603
-4621
-4637
-4639
-4643
-4649
-4651
-4657
-4663
-4673
-4679
-4691
-4703
-4721
-4723
-4729
-4733
-4751
-4759
-4783
-4787
-4789
-4793
-4799
-4801
-4813
-4817
-4831
-4861
-4871
-4877
-4889
-4903
-4909
-4919
-4931
-4933
-4937
-4943
-4951
-4957
-4967
-4969
-4973
-4987
-4993
-4999
-5003
-5009
-5011
-5021
-5023
-5039
-5051
-5059
-5077
-5081
-5087
-5099
-5101
-5107
-5113
-5119
-5147
-5153
-5167
-5171
-5179
-5189
-5197
-5209
-5227
-5231
-5233
-5237
-5261
-5273
-5279
-5281
-5297
-5303
-5309
-5323
-5333
-5347
-5351
-5381
-5387
-5393
-5399
-5407
-5413
-5417
-5419
-5431
-5437
-5441
-5443
-5449
-5471
-5477
-5479
-5483
-5501
-5503
-5507
-5519
-5521
-5527
-5531
-5557
-5563
-5569
-5573
-5581
-5591
-5623
-5639
-5641
-5647
-5651
-5653
-5657
-5659
-5669
-5683
-5689
-5693
-5701
-5711
-5717
-5737
-5741
-5743
-5749
-5779
-5783
-5791
-5801
-5807
-5813
-5821
-5827
-5839
-5843
-5849
-5851
-5857
-5861
-5867
-5869
-5879
-5881
-5897
-5903
-5923
-5927
-5939
-5953
-5981
-5987
-6007
-6011
-6029
-6037
-6043
-6047
-6053
-6067
-6073
-6079
-6089
-6091
-6101
-6113
-6121
-6131
-6133
-6143
-6151
-6163
-6173
-6197
-6199
-6203
-6211
-6217
-6221
-6229
-6247
-6257
-6263
-6269
-6271
-6277
-6287
-6299
-6301
-6311
-6317
-6323
-6329
-6337
-6343
-6353
-6359
-6361
-6367
-6373
-6379
-6389
-6397
-6421
-6427
-6449
-6451
-6469
-6473
-6481
-6491
-6521
-6529
-6547
-6551
-6553
-6563
-6569
-6571
-6577
-6581
-6599
-6607
-6619
-6637
-6653
-6659
-6661
-6673
-6679
-6689
-6691
-6701
-6703
-6709
-6719
-6733
-6737
-6761
-6763
-6779
-6781
-6791
-6793
-6803
-6823
-6827
-6829
-6833
-6841
-6857
-6863
-6869
-6871
-6883
-6899
-6907
-6911
-6917
-6947
-6949
-6959
-6961
-6967
-6971
-6977
-6983
-6991
-6997
-7001
-7013
-7019
-7027
-7039
-7043
-7057
-7069
-7079
-7103
-7109
-7121
-7127
-7129
-7151
-7159
-7177
-7187
-7193
-7207
-7211
-7213
-7219
-7229
-7237
-7243
-7247
-7253
-7283
-7297
-7307
-7309
-7321
-7331
-7333
-7349
-7351
-7369
-7393
-7411
-7417
-7433
-7451
-7457
-7459
-7477
-7481
-7487
-7489
-7499
-7507
-7517
-7523
-7529
-7537
-7541
-7547
-7549
-7559
-7561
-7573
-7577
-7583
-7589
-7591
-7603
-7607
-7621
-7639
-7643
-7649
-7669
-7673
-7681
-7687
-7691
-7699
-7703
-7717
-7723
-7727
-7741
-7753
-7757
-7759
-7789
-7793
-7817
-7823
-7829
-7841
-7853
-7867
-7873
-7877
-7879
-7883
-7901
-7907
\ No newline at end of file
diff --git a/static/111_generated_code.py b/static/111_generated_code.py
deleted file mode 100644
index d1bf4973..00000000
--- a/static/111_generated_code.py
+++ /dev/null
@@ -1,39 +0,0 @@
-inputFiles = [] # DO NOT CHANGE THIS LINE
-
-import json
-
-def is_prime(n):
- if n <= 1:
- return False
- if n <= 3:
- return True
- if n % 2 == 0 or n % 3 == 0:
- return False
- i = 5
- while i * i <= n:
- if n % i == 0 or n % (i + 2) == 0:
- return False
- i += 6
- return True
-
-def generate_primes(count):
- primes = []
- num = 2
- while len(primes) < count:
- if is_prime(num):
- primes.append(num)
- num += 1
- return primes
-
-primes = generate_primes(779)
-prime_numbers_content = "\n".join(map(str, primes))
-
-result = {
- "prime_numbers.txt": {
- "content": prime_numbers_content,
- "base64Encoded": False,
- "contentType": "text/plain"
- }
-}
-
-print(json.dumps(result))
\ No newline at end of file
diff --git a/static/112_execution_history.json b/static/112_execution_history.json
deleted file mode 100644
index c419a1c2..00000000
--- a/static/112_execution_history.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "attempt": 1,
- "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\nimport json\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(779)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nprint(json.dumps(result))",
- "result": {
- "success": true,
- "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
- "error": "",
- "result": {
- "prime_numbers.txt": {
- "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927",
- "base64Encoded": false,
- "contentType": "text/plain"
- }
- },
- "exitCode": 0
- }
- }
-]
\ No newline at end of file
diff --git a/static/113_prime_numbers.txt b/static/113_prime_numbers.txt
deleted file mode 100644
index 938f366b..00000000
--- a/static/113_prime_numbers.txt
+++ /dev/null
@@ -1,779 +0,0 @@
-2
-3
-5
-7
-11
-13
-17
-19
-23
-29
-31
-37
-41
-43
-47
-53
-59
-61
-67
-71
-73
-79
-83
-89
-97
-101
-103
-107
-109
-113
-127
-131
-137
-139
-149
-151
-157
-163
-167
-173
-179
-181
-191
-193
-197
-199
-211
-223
-227
-229
-233
-239
-241
-251
-257
-263
-269
-271
-277
-281
-283
-293
-307
-311
-313
-317
-331
-337
-347
-349
-353
-359
-367
-373
-379
-383
-389
-397
-401
-409
-419
-421
-431
-433
-439
-443
-449
-457
-461
-463
-467
-479
-487
-491
-499
-503
-509
-521
-523
-541
-547
-557
-563
-569
-571
-577
-587
-593
-599
-601
-607
-613
-617
-619
-631
-641
-643
-647
-653
-659
-661
-673
-677
-683
-691
-701
-709
-719
-727
-733
-739
-743
-751
-757
-761
-769
-773
-787
-797
-809
-811
-821
-823
-827
-829
-839
-853
-857
-859
-863
-877
-881
-883
-887
-907
-911
-919
-929
-937
-941
-947
-953
-967
-971
-977
-983
-991
-997
-1009
-1013
-1019
-1021
-1031
-1033
-1039
-1049
-1051
-1061
-1063
-1069
-1087
-1091
-1093
-1097
-1103
-1109
-1117
-1123
-1129
-1151
-1153
-1163
-1171
-1181
-1187
-1193
-1201
-1213
-1217
-1223
-1229
-1231
-1237
-1249
-1259
-1277
-1279
-1283
-1289
-1291
-1297
-1301
-1303
-1307
-1319
-1321
-1327
-1361
-1367
-1373
-1381
-1399
-1409
-1423
-1427
-1429
-1433
-1439
-1447
-1451
-1453
-1459
-1471
-1481
-1483
-1487
-1489
-1493
-1499
-1511
-1523
-1531
-1543
-1549
-1553
-1559
-1567
-1571
-1579
-1583
-1597
-1601
-1607
-1609
-1613
-1619
-1621
-1627
-1637
-1657
-1663
-1667
-1669
-1693
-1697
-1699
-1709
-1721
-1723
-1733
-1741
-1747
-1753
-1759
-1777
-1783
-1787
-1789
-1801
-1811
-1823
-1831
-1847
-1861
-1867
-1871
-1873
-1877
-1879
-1889
-1901
-1907
-1913
-1931
-1933
-1949
-1951
-1973
-1979
-1987
-1993
-1997
-1999
-2003
-2011
-2017
-2027
-2029
-2039
-2053
-2063
-2069
-2081
-2083
-2087
-2089
-2099
-2111
-2113
-2129
-2131
-2137
-2141
-2143
-2153
-2161
-2179
-2203
-2207
-2213
-2221
-2237
-2239
-2243
-2251
-2267
-2269
-2273
-2281
-2287
-2293
-2297
-2309
-2311
-2333
-2339
-2341
-2347
-2351
-2357
-2371
-2377
-2381
-2383
-2389
-2393
-2399
-2411
-2417
-2423
-2437
-2441
-2447
-2459
-2467
-2473
-2477
-2503
-2521
-2531
-2539
-2543
-2549
-2551
-2557
-2579
-2591
-2593
-2609
-2617
-2621
-2633
-2647
-2657
-2659
-2663
-2671
-2677
-2683
-2687
-2689
-2693
-2699
-2707
-2711
-2713
-2719
-2729
-2731
-2741
-2749
-2753
-2767
-2777
-2789
-2791
-2797
-2801
-2803
-2819
-2833
-2837
-2843
-2851
-2857
-2861
-2879
-2887
-2897
-2903
-2909
-2917
-2927
-2939
-2953
-2957
-2963
-2969
-2971
-2999
-3001
-3011
-3019
-3023
-3037
-3041
-3049
-3061
-3067
-3079
-3083
-3089
-3109
-3119
-3121
-3137
-3163
-3167
-3169
-3181
-3187
-3191
-3203
-3209
-3217
-3221
-3229
-3251
-3253
-3257
-3259
-3271
-3299
-3301
-3307
-3313
-3319
-3323
-3329
-3331
-3343
-3347
-3359
-3361
-3371
-3373
-3389
-3391
-3407
-3413
-3433
-3449
-3457
-3461
-3463
-3467
-3469
-3491
-3499
-3511
-3517
-3527
-3529
-3533
-3539
-3541
-3547
-3557
-3559
-3571
-3581
-3583
-3593
-3607
-3613
-3617
-3623
-3631
-3637
-3643
-3659
-3671
-3673
-3677
-3691
-3697
-3701
-3709
-3719
-3727
-3733
-3739
-3761
-3767
-3769
-3779
-3793
-3797
-3803
-3821
-3823
-3833
-3847
-3851
-3853
-3863
-3877
-3881
-3889
-3907
-3911
-3917
-3919
-3923
-3929
-3931
-3943
-3947
-3967
-3989
-4001
-4003
-4007
-4013
-4019
-4021
-4027
-4049
-4051
-4057
-4073
-4079
-4091
-4093
-4099
-4111
-4127
-4129
-4133
-4139
-4153
-4157
-4159
-4177
-4201
-4211
-4217
-4219
-4229
-4231
-4241
-4243
-4253
-4259
-4261
-4271
-4273
-4283
-4289
-4297
-4327
-4337
-4339
-4349
-4357
-4363
-4373
-4391
-4397
-4409
-4421
-4423
-4441
-4447
-4451
-4457
-4463
-4481
-4483
-4493
-4507
-4513
-4517
-4519
-4523
-4547
-4549
-4561
-4567
-4583
-4591
-4597
-4603
-4621
-4637
-4639
-4643
-4649
-4651
-4657
-4663
-4673
-4679
-4691
-4703
-4721
-4723
-4729
-4733
-4751
-4759
-4783
-4787
-4789
-4793
-4799
-4801
-4813
-4817
-4831
-4861
-4871
-4877
-4889
-4903
-4909
-4919
-4931
-4933
-4937
-4943
-4951
-4957
-4967
-4969
-4973
-4987
-4993
-4999
-5003
-5009
-5011
-5021
-5023
-5039
-5051
-5059
-5077
-5081
-5087
-5099
-5101
-5107
-5113
-5119
-5147
-5153
-5167
-5171
-5179
-5189
-5197
-5209
-5227
-5231
-5233
-5237
-5261
-5273
-5279
-5281
-5297
-5303
-5309
-5323
-5333
-5347
-5351
-5381
-5387
-5393
-5399
-5407
-5413
-5417
-5419
-5431
-5437
-5441
-5443
-5449
-5471
-5477
-5479
-5483
-5501
-5503
-5507
-5519
-5521
-5527
-5531
-5557
-5563
-5569
-5573
-5581
-5591
-5623
-5639
-5641
-5647
-5651
-5653
-5657
-5659
-5669
-5683
-5689
-5693
-5701
-5711
-5717
-5737
-5741
-5743
-5749
-5779
-5783
-5791
-5801
-5807
-5813
-5821
-5827
-5839
-5843
-5849
-5851
-5857
-5861
-5867
-5869
-5879
-5881
-5897
-5903
-5923
-5927
\ No newline at end of file
diff --git a/static/114_generated_code.py b/static/114_generated_code.py
deleted file mode 100644
index 173154fc..00000000
--- a/static/114_generated_code.py
+++ /dev/null
@@ -1,38 +0,0 @@
-inputFiles = [] # DO NOT CHANGE THIS LINE
-
-def is_prime(n):
- if n <= 1:
- return False
- if n <= 3:
- return True
- if n % 2 == 0 or n % 3 == 0:
- return False
- i = 5
- while i * i <= n:
- if n % i == 0 or n % (i + 2) == 0:
- return False
- i += 6
- return True
-
-def generate_primes(limit):
- primes = []
- num = 2
- while len(primes) < limit:
- if is_prime(num):
- primes.append(num)
- num += 1
- return primes
-
-primes = generate_primes(1000)
-primes_content = "\n".join(map(str, primes))
-
-result = {
- "first_1000_primes.txt": {
- "content": primes_content,
- "base64Encoded": False,
- "contentType": "text/plain"
- }
-}
-
-import json
-print(json.dumps(result))
\ No newline at end of file
diff --git a/static/115_execution_history.json b/static/115_execution_history.json
deleted file mode 100644
index 9795dcc0..00000000
--- a/static/115_execution_history.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "attempt": 1,
- "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"first_1000_primes.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
- "result": {
- "success": true,
- "output": "{\"first_1000_primes.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\\n7919\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
- "error": "",
- "result": {
- "first_1000_primes.txt": {
- "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907\n7919",
- "base64Encoded": false,
- "contentType": "text/plain"
- }
- },
- "exitCode": 0
- }
- }
-]
\ No newline at end of file
diff --git a/static/116_workflowManager.py b/static/116_workflowManager.py
deleted file mode 100644
index db4c4f65..00000000
--- a/static/116_workflowManager.py
+++ /dev/null
@@ -1,1306 +0,0 @@
-"""
-Workflow Manager Module for state machine-based backend chat workflow.
-Implements the state machine as defined in the documentation.
-"""
-
-import asyncio
-import os
-import logging
-import json
-import re
-import uuid
-import base64
-from datetime import datetime
-from typing import Dict, Any, List, Optional, Union, Tuple
-
-from modules.mimeUtils import isTextMimeType, determineContentEncoding
-
-# Required imports
-from modules.workflowAgentsRegistry import getAgentRegistry
-from modules.lucydomInterface import getLucydomInterface as domInterface
-from modules.documentProcessor import getDocumentContents
-
-# Configure logger
-logger = logging.getLogger(__name__)
-
-# Global settings for the workflow management
-GLOBAL_WORKFLOW_LABELS = {
- "systemName": "AI Assistant", # Default system name for logs
- "workflowStatusMessages": {
- "init": "Workflow initialized",
- "running": "Running workflow",
- "waiting": "Waiting for input",
- "completed": "Workflow completed successfully",
- "stopped": "Workflow stopped by user",
- "failed": "Error in workflow"
- }
-}
-class WorkflowStoppedException(Exception):
- """Exception raised when a workflow is forcibly stopped with function checkExitCriteria() """
- pass
-
-class WorkflowManager:
- """
- Manages the processing of chat requests, agent execution, and
- the integration of results into the workflow, following a state machine approach.
- """
-
- def __init__(self, mandateId: int, userId: int):
- """
- Initializes the WorkflowManager with mandate and user context.
-
- Args:
- mandateId: ID of the current mandate
- userId: ID of the current user
- """
- self.mandateId = mandateId
- self.userId = userId
- self.mydom = domInterface(mandateId, userId)
- self.agentRegistry = getAgentRegistry()
- self.agentRegistry.setMydom(self.mydom)
-
-
- ### Workflow State Machine Implementation
-
- async def workflowStart(self, userInput: Dict[str, Any], workflowId: Optional[str] = None) -> Dict[str, Any]:
- """
- Main entry point for starting or continuing a workflow (State 1: Workflow Initialization).
- Initializes a new workflow or loads an existing one based on workflowId.
-
- Args:
- userInput: User input with prompt and optional file list
- workflowId: Optional workflow ID to continue an existing workflow
-
- Returns:
- Initialized workflow object with status "running"
- """
- # 1. Initialize workflow or load existing one
- workflow = self.workflowInit(workflowId)
- self.logAdd(workflow, "Starting workflow processing", level="info", progress=0)
-
- # Start asynchronous processing
- asyncio.create_task(self.workflowProcess(userInput, workflow))
-
- return workflow
-
- ### Forces exit
-
- def checkExitCriteria(self, workflow: Dict[str, Any]):
- current_workflow = self.mydom.loadWorkflowState(workflow["id"])
- if current_workflow["status"] in ["stopped", "failed"]:
- self.logAdd(workflow, f"Workflow processing terminated due to status: {current_workflow['status']}", level="info")
- # Raise an exception to stop execution
- raise WorkflowStoppedException(f"Workflow execution stopped due to status: {current_workflow['status']}")
-
- async def workflowStop(self, workflowId: str) -> Dict[str, Any]:
- """
- Stops a running workflow (State 8: Workflow Stopped).
- Sets status to "stopped" and adds a log entry.
-
- Args:
- workflowId: ID of the workflow to stop
-
- Returns:
- Updated workflow with status="stopped"
- """
- workflow = self.mydom.loadWorkflowState(workflowId)
- if not workflow:
- return {"error": "Workflow not found", "status": "failed"}
-
- # Update status to stopped
- workflow["status"] = "stopped"
- workflow["lastActivity"] = datetime.now().isoformat()
-
- # Update in database
- self.mydom.updateWorkflow(workflowId, {
- "status": workflow["status"],
- "lastActivity": workflow["lastActivity"]
- })
-
- self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["stopped"], level="info", progress=100)
- return workflow
-
- async def workflowProcess(self, userInput: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]:
- """
- Main processing function that implements the workflow state machine.
- Handles the complete workflow process from user input to final response.
-
- Args:
- userInput: User input with prompt and optional file list
- workflow: Current workflow object
-
- Returns:
- Updated workflow with processing results
- """
- try:
- # State 3: User Message Processing
- self.checkExitCriteria(workflow)
- messageUser = await self.chatMessageToWorkflow("user", "", userInput, workflow)
- messageUser["status"] = "first" # For first message
-
- # State 4: Project Manager Analysis
- self.checkExitCriteria(workflow)
- self.logAdd(workflow, "Analyzing request and planning work", level="info", progress=10)
- projectManagerResponse = await self.projectManagerAnalysis(messageUser, workflow)
- objFinalDocuments = projectManagerResponse.get("objFinalDocuments", [])
- objWorkplan = projectManagerResponse.get("objWorkplan", [])
- objUserResponse = projectManagerResponse.get("objUserResponse", "")
-
- # Get detected language and set it in the mydom interface
- self.checkExitCriteria(workflow)
- userLanguage = projectManagerResponse.get("userLanguage", "en")
- self.mydom.setUserLanguage(userLanguage)
-
- # Save the response as a message in the workflow and add log entries
- self.checkExitCriteria(workflow)
- responseMessage = {
- "role": "assistant",
- "agentName": "project_manager",
- "content": objUserResponse,
- "status": "step" # As per state machine specification
- }
- self.messageAdd(workflow, responseMessage)
-
- self.logAdd(workflow, f"Planned outputs: {len(objFinalDocuments)} documents", level="info", progress=20)
- self.logAdd(workflow, f"Work plan created with {len(objWorkplan)} steps", level="info", progress=25)
-
- # State 5: Agent Execution
- objResults = []
- if objWorkplan:
- totalTasks = len(objWorkplan)
- for taskIndex, task in enumerate(objWorkplan):
- self.checkExitCriteria(workflow)
-
- agentName = task.get("agent", "unknown")
- progressValue = 30 + int((taskIndex / totalTasks) * 60) # Progress from 30% to 90%
-
- progressMsg = f"Running task {taskIndex+1}/{totalTasks}: {agentName}"
- self.logAdd(workflow, progressMsg, level="info", progress=progressValue)
-
- taskResults = await self.agentProcessing(task, workflow)
- objResults.extend(taskResults)
-
- # Log completion of this task
- self.logAdd(
- workflow,
- f"Completed task {taskIndex+1}/{totalTasks}: {agentName}",
- level="info",
- progress=progressValue + (60/totalTasks)/2
- )
-
- # State 6: Final Response Generation
- self.checkExitCriteria(workflow)
- self.logAdd(workflow, "Creating final response", level="info", progress=90)
- finalMessage = await self.generateFinalMessage(objUserResponse, objFinalDocuments, objResults)
- finalMessage["status"] = "last" # As per state machine specification
- self.messageAdd(workflow, finalMessage)
-
- # State 7: Workflow Completion
- self.checkExitCriteria(workflow)
- self.workflowFinish(workflow)
-
- return workflow
-
- except Exception as e:
- # State 2: Workflow Exception
- logger.error(f"Workflow processing error: {str(e)}", exc_info=True)
- workflow["status"] = "failed"
- workflow["lastActivity"] = datetime.now().isoformat()
-
- # Update in database
- self.mydom.updateWorkflow(workflow["id"], {
- "status": "failed",
- "lastActivity": workflow["lastActivity"]
- })
-
- self.logAdd(workflow, f"Workflow failed: {str(e)}", level="error", progress=100)
- return workflow
-
- def workflowInit(self, workflowId: Optional[str] = None) -> Dict[str, Any]:
- """
- Initializes a workflow or loads an existing one with round counting (State 1: Workflow Initialization).
-
- Args:
- workflowId: Optional - ID of the workflow to load
-
- Returns:
- Initialized workflow object
- """
- currentTime = datetime.now().isoformat()
-
- logger.debug(f"CHECK DATA0 id'{workflowId}'")
- workflowExist=self.mydom.getWorkflow(workflowId)
- if workflowId is None or not workflowExist:
- logger.debug(f"CHECK DATA1 id'{workflowId}'")
- # Create new workflow
- newWorkflowId = str(uuid.uuid4()) if workflowId is None else workflowId
- workflow = {
- "id": newWorkflowId,
- "mandateId": self.mandateId,
- "userId": self.userId,
- "name": f"Workflow {newWorkflowId[:8]}",
- "startedAt": currentTime,
- "messages": [], # Empty list - will be filled with references
- "messageIds": [], # Initialize empty messageIds list
- "logs": [],
- "dataStats": {},
- "currentRound": 1,
- "status": "running",
- "lastActivity": currentTime,
- }
-
- # Save to database - only the workflow metadata
- workflowDb = {
- "id": workflow["id"],
- "mandateId": workflow["mandateId"],
- "userId": workflow["userId"],
- "name": workflow["name"],
- "startedAt": workflow["startedAt"],
- "status": workflow["status"],
- "dataStats": workflow["dataStats"],
- "currentRound": workflow["currentRound"],
- "lastActivity": workflow["lastActivity"],
- "messageIds": workflow["messageIds"] # Include messageIds
- }
- self.mydom.createWorkflow(workflowDb)
-
- self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["init"], level="info", progress=0)
- logger.debug(f"CHECK DATA {workflow}")
- return workflow
- else:
- # State 10: Workflow Resumption - Load existing workflow
- workflow = self.mydom.loadWorkflowState(workflowId)
-
- # Ensure messageIds exists
- if "messageIds" not in workflow:
- # Initialize from existing messages
- workflow["messageIds"] = [msg["id"] for msg in workflow.get("messages", [])]
-
- # Update in database
- self.mydom.updateWorkflow(workflowId, {"messageIds": workflow["messageIds"]})
-
- # Update status and increment round counter
- workflow["status"] = "running"
- workflow["lastActivity"] = currentTime
-
- # Increment currentRound if it exists, otherwise set it to 1
- if "currentRound" in workflow:
- workflow["currentRound"] += 1
- else:
- workflow["currentRound"] = 1
-
- # Update in database - only the relevant workflow fields
- workflowUpdate = {
- "status": workflow["status"],
- "lastActivity": workflow["lastActivity"],
- "currentRound": workflow["currentRound"]
- }
- self.mydom.updateWorkflow(workflowId, workflowUpdate)
-
- self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["running"], level="info", progress=0)
- return workflow
-
- def workflowFinish(self, workflow: Dict[str, Any]) -> Dict[str, Any]:
- """
- Finalizes a workflow and sets the status to 'completed' (State 7: Workflow Completion).
-
- Args:
- workflow: Workflow object
-
- Returns:
- Updated workflow object
- """
- # Prepare workflow update data
- workflowUpdate = {
- "status": "completed",
- "lastActivity": datetime.now().isoformat(),
- }
-
- # Update the workflow object in memory
- workflow["status"] = workflowUpdate["status"]
- workflow["lastActivity"] = workflowUpdate["lastActivity"]
-
- # Save workflow state to database - only relevant fields
- self.mydom.updateWorkflow(workflow["id"], workflowUpdate)
-
- self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["completed"], level="info", progress=100)
- return workflow
-
- async def projectManagerAnalysis(self, messageUser: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]:
- """
- Creates the prompt for the project manager and processes the response (State 4: Project Manager Analysis).
-
- Args:
- messageUser: Message object with user request
- workflow: Current workflow object
-
- Returns:
- Project manager's response with objFinalDocuments, objWorkplan and objUserResponse
- """
- # Get available agents with their capabilities
- availableAgents = self.agentProfiles()
-
- # Create a workflow summary
- workflowSummary = await self.workflowSummarize(workflow, messageUser)
-
- # Create a list of currently available documents from user input or previously generated documents
- availableDocuments = self.getAvailableDocuments(workflow, messageUser)
- availableDocsStr = json.dumps(availableDocuments, indent=2)
-
- # Create the prompt for the project manager with language detection requirement
- prompt = f"""
-Based on the user request and the provided documents, please analyze the requirements and create a processing plan.
-Also, identify the language of the user's request and include it in your response.
-
-
-{messageUser.get('content')}
-
-
-# Previous conversation history:
-
-{workflowSummary}
-
-
-# Available documents (currently in workflow):
-
-{availableDocsStr}
-
-
-# Available agents and their capabilities:
-
-{self.parseJson2text(availableAgents)}
-
-
-Please analyze the request and create:
-
-1. A list of required result documents (objFinalDocuments)
-2. A plan for executing agents (objWorkplan)
-3. A clear response to the user explaining what you're doing (objUserResponse)
-4. Identified language of the user's request (userLanguage)
-
-## IMPORTANT RULES FOR THE WORKPLAN:
-1. Each input document must either already exist (provided by the user or previously created by an agent) or be created by an agent before it's used.
-2. If necessary, convert input documents to a suitable format using agents when the type doesn't match.
-3. Do not define document inputs that don't exist or haven't been generated beforehand.
-4. Create a logical sequence - earlier agents can create documents that are later used as inputs.
-5. If the user has provided documents but hasn't clearly stated what they want, try to act according to the context.
-
-Your answer must be strictly in the JSON_OUTPUT format, with no additions before or after the JSON object.
-
-JSON_OUTPUT = {{
- "objFinalDocuments": ["label",...], # document label in the format 'filename.ext'
- "objWorkplan": [
- {{
- "agent": "agent_name", # Name of an available agent
- "prompt": "Specific instructions to the agent, that he knows what to do with which documents and which output to provide."
- "outputDocuments": [
- {{
- "label":"document label in the format 'filename.ext'",
- "prompt":"AI prompt to describe the content of the file"
- }}
- ],
- "inputDocuments": [
- {{
- "label":"document label in the format 'filename.ext'",
- "fileId":id, # if refering to an existing document, provide fileId to select the correct file
- "contentPart":"", # provide empty string, if all document contents to consider, otherwise the contentPart of the document to focus on
- "prompt":"AI prompt to describe what data to extract from the file."
- }}
- ], # If no input documents are needed, include "inputDocuments" as an empty list
- }}
- # Multiple agent tasks can be added here and should build logically on each other
- ],
- "objUserResponse": "Information to the user about how his request will be solved, in the language of the user's request.",
- "userLanguage": "en" # Language code (e.g., en, de, fr, es) based on the user's request
-}}
-
-## RULES for inputDocuments:
-1. The user request refers to documents where "fileSource" in available documents is "user". Those documents are in the focus for input
-2. In case of redundant label in available documents, use document with highest sequenceNr if not specified differently
-
-## STRICT RULES FOR document "label":
-1. Every document label MUST include a proper file extension that matches the content type.
-2. Use standard extensions like:
- - ".txt" for text files
- - ".md" for markdown files
- - ".csv" for comma-separated values
- - ".json" for JSON data
- - ".html" for HTML content
- - ".jpg" or ".png" for images
- - ".docx" for Word documents
- - ".xlsx" for Excel files
- - ".pdf" for PDF documents
-3. Use descriptive filenames that indicate the document's purpose (e.g., "analysis_report.txt" rather than just "report.txt")
-4. If you use label for an existing file
-"""
-
- # Call the AI service through mydom for language support
- logger.debug(f"PROJECT MANAGER Planning prompt: {prompt}")
- projectManagerOutput = await self.mydom.callAi([
- {
- "role": "system",
- "content": "You are an experienced project manager who analyzes user requests and creates work plans. You pay very careful attention to ensure that all document dependencies are correct and that no non-existent documents are defined as inputs. The output follows strictly the specified format."
- },
- {
- "role": "user",
- "content": prompt
- }
- ])
-
- # Parse the JSON response
- logger.debug(f"PROJECT MANAGER Planning answer: {projectManagerOutput}")
- return self.parseJsonResponse(projectManagerOutput)
-
- async def agentProcessing(self, task: Dict[str, Any], workflow: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- Process a single agent task from the workflow (State 5: Agent Execution).
- Optimized for the task-based approach where all agents implement processTask.
-
- Args:
- task: The task definition containing agent name, prompt, and document specifications
- workflow: The current workflow object
-
- Returns:
- List of document objects created by the agent
- """
- # 1. Extract task information
- agentName = task.get("agent")
- agentPrompt = task.get("prompt", "")
-
- # Log the current step
- outputLabels = []
- for doc in task.get("outputDocuments", []):
- outputLabels.append(doc.get("label", "unknown"))
-
- stepInfo = f"Agent '{agentName}' to create {', '.join(outputLabels)}."
- self.logAdd(workflow, stepInfo, level="info")
-
- # Check if prompt is empty
- if agentPrompt == "":
- logger.warning("Empty prompt, no task to do")
- return []
-
- # Get agent from registry
- agent = self.agentRegistry.getAgent(agentName)
- if not agent:
- logger.error(f"Agent '{agentName}' not found")
- return []
-
- # Prepare output document specifications
- outputSpecs = []
- for doc in task.get("outputDocuments", []):
- outputSpec = {
- "label": doc.get("label"),
- "description": doc.get("prompt", "")
- }
- outputSpecs.append(outputSpec)
-
- # Prepare input documents for the agent
- inputDocuments = await self.prepareAgentInputDocuments(task.get('inputDocuments', []), workflow)
-
- # Create a standardized task object for the agent as per state machine spec
- agentTask = {
- "taskId": str(uuid.uuid4()),
- "workflowId": workflow.get("id"),
- "prompt": agentPrompt,
- "inputDocuments": inputDocuments,
- "outputSpecifications": outputSpecs,
- "context": {
- "workflowRound": workflow.get("currentRound", 1),
- "agentType": agentName,
- "timestamp": datetime.now().isoformat(),
- "language": self.mydom.userLanguage # Pass language to agent
- }
- }
-
- # Execute the agent with the standardized task
- try:
- # Process the task using the agent's standardized interface
- logger.debug("TASK: "+self.parseJson2text(agentTask))
- logger.debug(f"Agent '{agentName}' AI service available: {agent.mydom is not None}")
-
- agentResults = await agent.processTask(agentTask)
-
- logger.debug(f"Agent '{agentName}' completed task. RESULT: {self.parseJson2text(agentResults)}")
-
- # Log the agent response
- self.logAdd(
- workflow,
- f"Agent '{agentName}' completed task. Feedback: {agentResults.get('feedback', 'No feedback provided')}",
- level="info"
- )
-
- # Store produced files and prepare input object for message
- agentInputs = {
- "prompt": agentResults.get("feedback", ""),
- "listFileId": self.saveAgentDocuments(agentResults)
- }
-
- # Create a message in the workflow with the agent's response
- agentMessage = await self.chatMessageToWorkflow("assistant", agentName, agentInputs, workflow)
- agentMessage["status"] = "step" # As per state machine specification
- logger.debug(f"Agent result = {self.parseJson2text(agentMessage)}.")
-
- return agentMessage.get("documents", [])
-
- except Exception as e:
- errorMsg = f"Error executing agent '{agentName}': {str(e)}"
- logger.error(errorMsg, exc_info=True) # Add exc_info=True to get full traceback
- self.logAdd(workflow, errorMsg, level="error")
- return []
-
- async def generateFinalMessage(self, objUserResponse: str, objFinalDocuments: List[str], objResults: List[Dict[str, Any]]) -> Dict[str, Any]:
- """
- Creates the final response message with review of promised and delivered documents (State 6: Final Response Generation).
-
- Args:
- objUserResponse: Initial text response to the user
- objFinalDocuments: List of expected response documents
- objResults: List of generated result documents
-
- Returns:
- Complete message object with content and relevant documents
- """
- # Find documents that match the objFinalDocuments requirements
- matchingDocuments = []
-
- if len(objFinalDocuments) > 0:
- for answerLabel in objFinalDocuments:
- # Find matching document in results
- for doc in objResults:
- docName = self.getFilename(doc)
- # Check if this document matches the answer specification
- if docName == answerLabel:
- contentRef = []
- for c in doc.get("contents", []):
- contentRef.append(c.get("summary", ""))
- docRef = {
- "label": docName,
- "contentSummary": contentRef
- }
- matchingDocuments.append(docRef)
- break
-
- # Use the mydom for language-aware AI calls
- finalPrompt = await self.mydom.callAi([
- {"role": "system", "content": "You are a project manager, who delivers results to a user."},
- {"role": "user", "content": f"""
-Give the final short feedback to the user with reference to the initial statement (objUserResponse). Inform him about the list of filesDelivered. You do not need to send the files, this is handled separately. If in the list of filesDelivered some files_promised would be missing, just give a comment on this, otherwise task is now completed successfully.
-
-Here the data:
-objUserResponse = {self.parseJson2text(objUserResponse)}
-filesPromised = {self.parseJson2text(objFinalDocuments)}
-filesDelivered = {self.parseJson2text(matchingDocuments)}
-"""
- }
- ], produceUserAnswer=True)
-
- # Create basic message structure with proper fields
- logger.debug(f"FINAL PROMPT = {self.parseJson2text(finalPrompt)}.")
- finalMessage = {
- "role": "assistant",
- "agentName": "project_manager",
- "content": finalPrompt,
- "documents": [] # DO NOT include the results documents, already with agents
- }
-
- logger.debug(f"FINAL MESSAGE = {self.parseJson2text(finalMessage)}.")
- return finalMessage
-
- async def workflowSummarize(self, workflow: Dict[str, Any], messageUser: Dict[str, Any]) -> str:
- """
- Creates a summary of the workflow without the current user message.
-
- Args:
- workflow: Workflow object
- messageUser: Current user message
-
- Returns:
- Summary of the workflow
- """
- if not workflow or "messages" not in workflow or not workflow["messages"]:
- return "" # First message
-
- # Go through messages in chronological order
- messages = sorted(workflow["messages"], key=lambda m: m.get("sequenceNo", 0), reverse=False)
-
- summaryParts = []
- for message in messages:
- if message["id"] != messageUser["id"]:
- messageSummary = await self.messageSummarize(message)
- summaryParts.append(messageSummary)
-
- return "\n\n".join(summaryParts)
-
- async def messageSummarize(self, message: Dict[str, Any]) -> str:
- """
- Creates a summary of a message including its documents.
-
- Args:
- message: Message to summarize
-
- Returns:
- Summary of the message
- """
- role = message.get("role", "undefined")
- agentName = message.get("agentName", "")
- content = message.get("content", "")
-
- try:
- # Use the mydom for language-aware AI calls
- contentSummary = await self.mydom.callAi([
- {"role": "system", "content": f"You are a chat message summarizer. Create a very concise summary (2-3 sentences, maximum 300 characters)"},
- {"role": "user", "content": content}
- ])
- except Exception as e:
- logger.error(f"Error creating summary: {str(e)}")
- contentSummary = content[:200] + "..."
-
- # Summarize documents
- docsSummary = ""
- if "documents" in message and message["documents"]:
- docsList = []
- for i, doc in enumerate(message["documents"]):
- docName = self.getFilename(doc)
- docsList.append(docName)
- if docsList:
- docsSummary = "\nDocuments:" + "\n- ".join(docsList)
-
- return f"[{role} {agentName}]: {contentSummary}{docsSummary}"
-
- async def chatMessageToWorkflow(self, role: str, agentName: str, chatMessage: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]:
- """
- Integrates user inputs into a Message object including files with complete contents (State 3: User Message Processing).
-
- Args:
- role: Role of the message sender ('user' or 'assistant')
- agentName: Name of the agent, if message is from an agent
- chatMessage: Input data with "prompt"=str, "listFileId"=[]
- workflow: Current workflow object
-
- Returns:
- Message object with content and documents including contents
- """
- logger.info(f"Message from {role} {agentName} sent with {len(chatMessage.get('listFileId', []))} documents")
- logger.debug(f"message = {self.parseJson2text(chatMessage)}.")
-
- # Check message content
- messageContent = chatMessage.get("prompt", "")
- if isinstance(messageContent, dict) and "content" in messageContent:
- messageContent = messageContent["content"]
-
- # If message content is empty, no chat
- if role == "user" and (messageContent is None or messageContent.strip() == ""):
- logger.warning(f"Empty message, no chat")
- messageContent = "(No user input received)"
-
- # Process additional files with complete contents
- additionalFileIds = chatMessage.get("listFileId", [])
- additionalFiles = await self.processFileIds(additionalFileIds)
-
- # Create message object
- messageObject = {
- "role": role,
- "agentName": agentName,
- "content": messageContent,
- "documents": additionalFiles,
- "status": chatMessage.get("status", "")
- }
-
- messageObject = self.messageAdd(workflow, messageObject)
- logger.debug(f"message_user = {self.parseJson2text(messageObject)}.")
- return messageObject
-
- async def processFileIds(self, fileIds: List[int]) -> List[Dict[str, Any]]:
- """
- Processes a list of File-IDs and returns the corresponding file objects as a list of Document objects.
- Loads all contents directly and adds summaries to each content item.
- Now properly handles the base64Encoded flag.
-
- Args:
- fileIds: List of file IDs
-
- Returns:
- List of Document objects with contents, summaries, and base64Encoded flags
- """
- documents = []
- logger.info(f"Processing {len(fileIds)} files")
-
- for fileId in fileIds:
- try:
- # Check if the file exists
- file = self.mydom.getFile(fileId)
- if not file:
- logger.warning(f"File with ID {fileId} not found")
- continue
-
- # Check if file belongs to the current mandate
- if file.get("mandateId") != self.mandateId:
- logger.warning(f"File {fileId} does not belong to mandate {self.mandateId}")
- continue
-
- # Load file content
- fileContent = self.mydom.getFileData(fileId)
- if fileContent is None:
- logger.warning(f"No content found for file with ID {fileId}")
- continue
-
- # Determine if file is text or binary based on MIME type
- mimeType = file.get("mimeType", "application/octet-stream")
- isTextFormat = isTextMimeType(mimeType)
-
- # Get file data from database
- fileDataEntries = self.mydom.db.getRecordset("fileData", recordFilter={"id": fileId})
- base64Encoded = False
-
- if fileDataEntries and "base64Encoded" in fileDataEntries[0]:
- # Use the flag from the database
- base64Encoded = fileDataEntries[0]["base64Encoded"]
- else:
- # Determine based on file type (fallback for older data)
- base64Encoded = not isTextFormat
-
- # Convert to base64 for document storage
- import base64
- encodedData = ""
-
- if base64Encoded:
- # Already base64 encoded in database
- encodedData = base64.b64encode(fileContent).decode('utf-8')
- else:
- # Text file - convert to string if it's bytes
- if isinstance(fileContent, bytes):
- try:
- fileContentStr = fileContent.decode('utf-8')
- encodedData = fileContentStr
- except UnicodeDecodeError:
- # Failed to decode as text, use base64
- encodedData = base64.b64encode(fileContent).decode('utf-8')
- base64Encoded = True
- else:
- # Already a string
- encodedData = fileContent
-
- # Create document
- fileNameExt = file.get("name")
- document = {
- "id": f"doc_{str(uuid.uuid4())}",
- "fileId": fileId,
- "name": os.path.splitext(fileNameExt)[0] if os.path.splitext(fileNameExt)[0] else "noname",
- "ext": os.path.splitext(fileNameExt)[1][1:] if os.path.splitext(fileNameExt)[1] else "bin",
- "data": encodedData,
- "base64Encoded": base64Encoded,
- "contents": []
- }
-
- # Extract contents
- contents = getDocumentContents(file, fileContent)
-
- # Add summaries to each content item
- for content in contents:
- content["summary"] = await self.messageSummarizeContent(content)
-
- # Ensure base64Encoded flag is set
- if "base64Encoded" not in content:
- # Use the flag from metadata if available
- content["base64Encoded"] = content.get("metadata", {}).get("base64Encoded", not content.get("metadata", {}).get("isText", False))
-
- document["contents"] = contents
-
- logger.info(f"File {file.get('name', 'unnamed')} (ID: {fileId}) loaded with {len(contents)} contents and summaries")
- documents.append(document)
-
- except Exception as e:
- logger.error(f"Error processing file {fileId}: {str(e)}")
- # Continue with remaining files instead of failing
- continue
-
- return documents
-
- async def prepareAgentInputDocuments(self, docInputList: List[Dict[str, Any]], workflow: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- Prepares input documents for an agent, sorted with newest first.
-
- Args:
- docInputList: List of required input documents as specified by the project manager
- workflow: Workflow object
-
- Returns:
- Prepared input documents for the agent, sorted with newest first
- """
- preparedInputs = []
-
- # Sort workflow messages by sequence number (descending)
- sortedMessages = sorted(
- workflow.get("messages", []),
- key=lambda m: m.get("sequenceNo", 0),
- reverse=True
- )
-
- for docSpec in docInputList:
- docFilename = docSpec.get("label", "")
- docFileId = docSpec.get("fileId", "")
-
- foundDoc = None
- # Search for the document in sorted workflow messages (newest first)
- for message in sortedMessages:
- for doc in message.get("documents", []):
- if (docFileId != "" and docFileId == doc.get("fileId")) or (docFilename != "" and self.getFilename(doc) == docFilename):
- foundDoc = doc
- break
- if foundDoc:
- break
- if foundDoc:
- # Process document for agent based on the specification
- processedDoc = await self.processDocumentForAgent(foundDoc, docSpec)
-
- preparedInputs.append(processedDoc)
- else:
- logger.warning(f"Document with label '{docFilename}', fileId '{docFileId}' not found in workflow")
-
- return preparedInputs
-
- async def processDocumentForAgent(self, document: Dict[str, Any], docSpec: Dict[str, Any]) -> Dict[str, Any]:
- """
- Processes a document for an agent based on the document specification.
- Uses AI to extract relevant content from the document based on the specification.
-
- Args:
- document: The document to process
- docSpec: The document specification from the project manager
-
- Returns:
- Processed document with AI-extracted content
- """
- processedDoc = document.copy()
- partSpec = docSpec.get("contentPart", "")
-
- # Process each content item in the document
- if "contents" in processedDoc:
- processedContents = []
-
- for content in processedDoc["contents"]:
-
- # Check if part required
- if partSpec != "" and partSpec != content.get("name"):
- continue
-
- # Get the data from the content
- data = content.get("data", "")
- processedContent = content.copy()
-
- # Check if content data is base64 encoded
- isBase64 = content.get("metadata", {}).get("base64Encoded", False)
-
- try:
- # Use the AI service to process the document content according to the prompt from the project manager for the document specification
- summary = docSpec.get("prompt", "Extract the relevant information from this document")
- aiPrompt = f"""
-# Please process the following document content according to this instruction:
-
-{summary}
-
-
-# Document content:
-
-{data}
-
-
-# Extract and provide only the relevant information as requested.
-"""
-
- # Call the AI service through mydom for language support
- processedData = await self.mydom.callAi([
- {"role": "system", "content": "You are a document processing assistant. Extract only the relevant information as requested."},
- {"role": "user", "content": aiPrompt}
- ])
-
- # DO NOT change the original data field
- # processedContent["data"] unchanged
- processedContent["dataExtracted"] = processedData
- processedContent["metadata"]["aiProcessed"] = True
-
- except Exception as e:
- logger.error(f"Error processing document content with AI: {str(e)}")
- # Fall back to original content if AI processing fails
- processedContent["dataExtracted"] = "(no information)"
-
- processedContents.append(processedContent)
-
- processedDoc["contents"] = processedContents
-
- return processedDoc
-
- async def messageSummarizeContent(self, content: Dict[str, Any]) -> str:
- """
- Generates a summary for a content item using AI.
-
- Args:
- content: Content item to summarize (already processed by getDocumentContents)
-
- Returns:
- Brief summary of the content
- """
- # Extract relevant information
- data = content.get("data", "")
- contentType = content.get("contentType", "text/plain")
- isText = content.get("metadata", {}).get("isText", False)
-
- try:
- # Use the mydom for language-aware AI calls
- summary = await self.mydom.callAi([
- {"role": "system", "content": "You are a content summarizer. Create very concise summary (1-2 sentences, maximum 200 characters) about this file."},
- {"role": "user", "content": f"Summarize this {contentType} content briefly:\n\n{data}"}
- ])
- return summary
-
- except Exception as e:
- logger.error(f"Error generating content summary: {str(e)}")
- return f"Text content ({contentType})"
-
- def messageAdd(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> Dict[str, Any]:
- """
- Adds a message to the workflow and updates lastActivity.
- Saves the message in the database and updates the workflow with references.
-
- Args:
- workflow: Workflow object
- message: Message to be saved
-
- Returns:
- Added message
- """
- currentTime = datetime.now().isoformat()
-
- # Ensure messages list exists
- if "messages" not in workflow:
- workflow["messages"] = []
-
- # Generate new message ID if not present
- if "id" not in message:
- message["id"] = f"msg_{str(uuid.uuid4())}"
-
- # Add workflow ID and timestamps
- message["workflowId"] = workflow["id"]
- message["startedAt"] = currentTime
- message["finishedAt"] = currentTime
-
- # Set sequence number
- message["sequenceNo"] = len(workflow["messages"]) + 1
-
- # Ensure required fields are present
- if "role" not in message:
- # Set a default role based on agentName
- message["role"] = "assistant" if message.get("agentName") else "user"
-
- if "agentName" not in message:
- message["agentName"] = ""
-
- # Set status if not present
- if "status" not in message:
- message["status"] = "step"
-
- # Add message to workflow
- workflow["messages"].append(message)
-
- # Ensure messageIds list exists
- if "messageIds" not in workflow:
- workflow["messageIds"] = []
-
- # Add message ID to the messageIds list
- workflow["messageIds"].append(message["id"])
-
- # Update workflow status
- workflow["lastActivity"] = currentTime
-
- # Save to database - first the message itself
- self.mydom.createWorkflowMessage(message)
-
- # Then save the workflow with updated references
- workflowUpdate = {
- "lastActivity": currentTime,
- "messageIds": workflow["messageIds"] # Update the messageIds field
- }
- self.mydom.updateWorkflow(workflow["id"], workflowUpdate)
-
- return message
-
- def logAdd(self, workflow: Dict[str, Any], message: str, level: str = "info",
- progress: Optional[int] = None) -> str:
- """
- Adds a log entry to the workflow and also logs it in the logger.
- Enhanced with standardized formatting and workflow status tracking.
-
- Args:
- workflow: Workflow object
- message: Log message
- level: Log level (info, warning, error)
- progress: Optional - Progress value (0-100)
-
- Returns:
- ID of the created log entry
- """
- # Ensure logs list exists
- if "logs" not in workflow:
- workflow["logs"] = []
-
- # Generate log ID
- logId = f"log_{str(uuid.uuid4())}"
-
- # Get workflow status
- workflowStatus = workflow.get("status", "running")
-
- # Set agentName from global settings
- agentName = GLOBAL_WORKFLOW_LABELS.get("systemName", "unknown")
-
- # Create log entry
- logEntry = {
- "id": logId,
- "workflowId": workflow["id"],
- "message": message,
- "type": level,
- "timestamp": datetime.now().isoformat(),
- "agentName": agentName,
- "status": workflowStatus
- }
-
- # Add progress if provided
- if progress is not None:
- logEntry["progress"] = progress
-
- # Add log to workflow
- workflow["logs"].append(logEntry)
-
- # Save in database
- self.mydom.createWorkflowLog(logEntry)
-
- # Also log in logger
- if level == "info":
- logger.info(f"Workflow {workflow['id']}: {message}")
- elif level == "warning":
- logger.warning(f"Workflow {workflow['id']}: {message}")
- elif level == "error":
- logger.error(f"Workflow {workflow['id']}: {message}")
-
- return logId
-
- def saveAgentDocuments(self, agentResults: Dict[str, Any]) -> List[int]:
- """
- Saves all documents from agent results as files and returns a list of file IDs.
- Enhanced to handle the standardized document format from agents with base64Encoded flag.
-
- Args:
- agentResults: Dictionary containing agent feedback and documents
-
- Returns:
- List of file IDs for the saved documents
- """
- fileIds = []
-
- # Extract documents from agent results
- documents = agentResults.get("documents", [])
-
- for doc in documents:
- try:
- # Extract label (filename) and content
- label = doc.get("label", "unnamed_file.txt")
- content = doc.get("content", "")
- base64Encoded = doc.get("base64Encoded", False)
-
- # Split label into name and extension
- name, ext = os.path.splitext(label)
- if ext.startswith('.'):
- ext = ext[1:] # Remove leading dot
- elif not ext:
- # If no extension is provided, default to .txt for text content
- ext = "txt"
- label = f"{label}.{ext}"
-
- # Convert content to bytes based on base64Encoded flag
- if isinstance(content, str):
- if base64Encoded:
- # Decode base64 to bytes
- try:
- import base64
- fileContent = base64.b64decode(content)
- except Exception as e:
- logger.warning(f"Failed to decode base64 content: {str(e)}")
- fileContent = content.encode('utf-8')
- base64Encoded = False
- else:
- # Convert text to bytes
- fileContent = content.encode('utf-8')
- else:
- # Already bytes
- fileContent = content
-
- # Determine MIME type based on extension
- mimeType = self.mydom.getMimeType(label)
-
- # Save file to database
- fileMeta = self.mydom.saveUploadedFile(fileContent, label)
-
- if fileMeta and "id" in fileMeta:
- fileId = fileMeta["id"]
- fileIds.append(fileId)
- logger.info(f"Saved document '{label}' with file ID: {fileId} (base64Encoded: {base64Encoded})")
- else:
- logger.warning(f"Failed to save document '{label}'")
-
- except Exception as e:
- logger.error(f"Error saving document from agent results: {str(e)}")
- # Continue with other documents instead of failing
- continue
-
- return fileIds
-
- def getAvailableDocuments(self, workflow: Dict[str, Any], messageUser: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- Determines all currently available documents from user input and already generated documents.
-
- Args:
- messageUser: Current message from the user
- workflow: Current workflow object
-
- Returns:
- List with information about all available documents, sorted by message sequenceNr in descending order
- """
- availableDocs = []
-
- if "messages" in workflow and workflow["messages"]:
- for message in workflow["messages"]:
- messageId = message.get("id", "unknown")
- sequenceNr = message.get("sequenceNo", 0)
-
- # Determine source
- source = "user" if messageId == messageUser.get("id") else "workflow"
-
- # Process documents in this message
- if "documents" in message and message["documents"]:
- for doc in message["documents"]:
- # Get filename using our helper method
- filename = self.getFilename(doc)
- fileId = doc.get("fileId")
-
- # Extract summaries from all contents
- contentSummaries = []
- for content in doc.get("contents", []):
- contentSummaries.append({
- "contentPart": content.get("name", "noname"),
- "metadata": content.get("metadata", ""),
- "summary": content.get("summary", "No summary"),
- })
-
- # Create document info
- docInfo = {
- "sequenceNr": sequenceNr,
- "fileSource": source,
- "fileId": fileId,
- "messageId": messageId,
- "label": filename,
- "contentSummaryList": contentSummaries,
- }
- availableDocs.append(docInfo)
-
- # Sort by message sequenceNr in descending order (newest first)
- availableDocs.sort(key=lambda x: x["sequenceNr"], reverse=True)
-
- logger.info(f"Available documents: {len(availableDocs)}")
- return availableDocs
-
- def agentProfiles(self) -> List[Dict[str, Any]]:
- """
- Gets information about all available agents.
-
- Returns:
- List with information about all available agents
- """
- return self.agentRegistry.getAgentInfos()
-
- def getFilename(self, document: Dict[str, Any]) -> str:
- """
- Gets the filename from a document by combining name and extension.
-
- Args:
- document: Document object
-
- Returns:
- Filename with extension
- """
- name = document.get("name", "unnamed")
- ext = document.get("ext", "")
- if ext:
- return f"{name}.{ext}"
- return name
-
- def parseJson2text(self, jsonObj: Any) -> str:
- """
- Converts a JSON object to a readable text representation.
-
- Args:
- jsonObj: JSON object to convert
-
- Returns:
- Formatted text representation
- """
- if not jsonObj:
- return "No data available"
-
- try:
- # Format with indentation for better readability
- return json.dumps(jsonObj, indent=2, ensure_ascii=False)
- except Exception as e:
- logger.error(f"Error in JSON conversion: {str(e)}")
- return str(jsonObj)
-
- def parseJsonResponse(self, responseText: str) -> Dict[str, Any]:
- """
- Parses the JSON response from a text.
-
- Args:
- responseText: Text with JSON content
-
- Returns:
- Parsed JSON data
- """
- try:
- # Extract JSON from the text (if mixed with other content)
- jsonStart = responseText.find('{')
- jsonEnd = responseText.rfind('}') + 1
-
- if jsonStart >= 0 and jsonEnd > jsonStart:
- jsonStr = responseText[jsonStart:jsonEnd]
- return json.loads(jsonStr)
- else:
- # Try to parse the entire text
- return json.loads(responseText)
- except json.JSONDecodeError as e:
- logger.error(f"JSON parsing error: {str(e)}")
- # Fallback: Return empty structure
- return {
- "objFinalDocuments": [],
- "objWorkplan": [],
- "objUserResponse": "Sorry, I could not parse your data.",
- "userLanguage": "en"
- }
-
-
-# Singleton factory for the WorkflowManager
-_workflowManagers = {}
-
-def getWorkflowManager(mandateId: int = 0, userId: int = 0) -> WorkflowManager:
- """
- Returns a WorkflowManager for the specified context.
- Reuses existing instances.
-
- Args:
- mandateId: ID of the mandate
- userId: ID of the user
-
- Returns:
- WorkflowManager instance
- """
- contextKey = f"{mandateId}_{userId}"
- if contextKey not in _workflowManagers:
- _workflowManagers[contextKey] = WorkflowManager(mandateId, userId)
- return _workflowManagers[contextKey]
\ No newline at end of file
diff --git a/static/117_workflowAgentsRegistry.py b/static/117_workflowAgentsRegistry.py
deleted file mode 100644
index 8cd7d63d..00000000
--- a/static/117_workflowAgentsRegistry.py
+++ /dev/null
@@ -1,270 +0,0 @@
-"""
-Agent Registry Module.
-Provides a central registry system for all available agents.
-Optimized for the standardized task processing pattern.
-"""
-
-import os
-import logging
-import importlib
-import uuid
-from datetime import datetime
-from typing import Dict, Any, List, Optional
-from modules.mimeUtils import isTextMimeType, determineContentEncoding
-
-
-logger = logging.getLogger(__name__)
-
-"""
-Updates to the AgentBase class in workflowAgentsRegistry.py to include base64Encoded flag handling.
-"""
-
-class AgentBase:
- """
- Base class for all chat agents.
- Defines the standardized interface for task processing.
- """
-
- def __init__(self):
- """Initialize the base agent."""
- self.name = "base-agent"
- self.description = "Basic agent functionality"
- self.capabilities = []
- self.mydom = None
-
- def setDependencies(self, mydom=None):
- """Set external dependencies for the agent."""
- self.mydom = mydom
-
- def getAgentInfo(self) -> Dict[str, Any]:
- """
- Return standardized information about the agent's capabilities.
-
- Returns:
- Dictionary with name, description, and capabilities
- """
- return {
- "name": self.name,
- "description": self.description,
- "capabilities": self.capabilities
- }
-
- async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
- """
- Process a standardized task structure and return results.
- This method must be implemented by all concrete agent classes.
-
- Args:
- task: A dictionary containing:
- - taskId: Unique ID for this task
- - workflowId: ID of the parent workflow (optional)
- - prompt: The main instruction for the agent
- - inputDocuments: List of document objects to process
- - outputSpecifications: List of required output documents
- - context: Additional contextual information
-
- Returns:
- A dictionary containing:
- - feedback: Text response explaining what the agent did
- - documents: List of document objects created by the agent,
- each containing a "base64Encoded" flag in addition to "label" and "content"
- """
- # Base implementation - should be overridden by specialized agents
- logger.warning(f"Agent {self.name} is using the default implementation of processTask")
- return {
- "feedback": f"The processTask method was not implemented by agent '{self.name}'.",
- "documents": []
- }
-
- def determineBase64EncodingFlag(self, filename: str, content: Any, mimeType: str = None) -> bool:
- """Wrapper for the utility function"""
- return determineContentEncoding(filename, content, mimeType)
-
- def isTextMimeType(self, mimeType: str) -> bool:
- """Wrapper for the utility function"""
- return isTextMimeType(mimeType)
-
- def formatAgentDocumentOutput(self, label: str, content: Any, contentType: str = None) -> Dict[str, Any]:
- """
- Helper method to properly format a document output with base64Encoded flag and metadata.
-
- Args:
- label: Name of the document
- content: Content of the document
- contentType: Optional content type for the document
-
- Returns:
- Properly formatted document dictionary
- """
- import base64
-
- # Determine if content should be base64 encoded
- should_base64_encode = self.determineBase64EncodingFlag(label, content)
-
- # Process content based on type and encoding flag
- formatted_content = content
-
- if should_base64_encode:
- if isinstance(content, bytes):
- # Convert binary to base64
- formatted_content = base64.b64encode(content).decode('utf-8')
- elif isinstance(content, str):
- try:
- # Check if it's already base64 encoded
- base64.b64decode(content)
- # If we get here, it appears to be valid base64
- formatted_content = content
- except:
- # Not valid base64, so encode it
- formatted_content = base64.b64encode(content.encode('utf-8')).decode('utf-8')
-
- # Create document with metadata
- doc = {
- "label": label,
- "content": formatted_content,
- "base64Encoded": should_base64_encode,
- "metadata": {}
- }
-
- # Add content type if provided
- if contentType:
- doc["metadata"]["contentType"] = contentType
-
- return doc
-
-class AgentRegistry:
- """Central registry for all available agents in the system."""
-
- _instance = None
-
- @classmethod
- def getInstance(cls):
- """Return a singleton instance of the agent registry."""
- if cls._instance is None:
- cls._instance = cls()
- return cls._instance
-
- def __init__(self):
- """Initialize the agent registry."""
- if AgentRegistry._instance is not None:
- raise RuntimeError("Singleton instance already exists - use getInstance()")
-
- self.agents = {}
- self.mydom = None
- self._loadAgents()
-
- def _loadAgents(self):
- """Load all available agents from modules."""
- logger.info("Loading agent modules...")
-
- # List of agent modules to load
- agentModules = []
- agentDir = os.path.dirname(__file__)
-
- # Search the directory for agent modules
- for filename in os.listdir(agentDir):
- if filename.startswith("agent") and filename.endswith(".py"):
- agentModules.append(filename[0:-3]) # Remove .py extension
-
- if not agentModules:
- logger.warning("No agent modules found")
- return
-
- logger.info(f"{len(agentModules)} agent modules found")
-
- # Load each agent module
- for moduleName in agentModules:
- try:
- # Import the module
- module = importlib.import_module(f"modules.{moduleName}")
-
- # Look for agent class or get_*_agent function
- agentName = moduleName.split("agent")[-1]
- className = f"Agent{agentName}"
- getterName = f"getAgent{agentName}"
-
- agent = None
-
- # Try to get the agent via the get*Agent function
- if hasattr(module, getterName):
- getterFunc = getattr(module, getterName)
- agent = getterFunc()
- logger.info(f"Agent '{agent.name}' loaded via {getterName}()")
-
- # Alternatively, try to instantiate the agent directly
- elif hasattr(module, className):
- agentClass = getattr(module, className)
- agent = agentClass()
- logger.info(f"Agent '{agent.name}' directly instantiated")
-
- if agent:
- # Register the agent
- self.registerAgent(agent)
- else:
- logger.warning(f"No agent class or getter function found in module {moduleName}")
-
- except ImportError as e:
- logger.error(f"Module {moduleName} could not be imported: {e}")
- except Exception as e:
- logger.error(f"Error loading agent from module {moduleName}: {e}")
-
- def setMydom(self, mydom):
- """Set the AI service for all agents."""
- self.mydom = mydom
- self.updateAgentDependencies()
-
- def updateAgentDependencies(self):
- """Update dependencies for all registered agents."""
- for agentId, agent in self.agents.items():
- if hasattr(agent, 'setDependencies'):
- agent.setDependencies(mydom=self.mydom)
-
- def registerAgent(self, agent):
- """
- Register an agent in the registry.
-
- Args:
- agent: The agent to register
- """
- agentId = getattr(agent, 'name', "unknown_agent")
- # Initialize agent with dependencies
- if hasattr(agent, 'setDependencies'):
- agent.setDependencies(mydom=self.mydom)
- self.agents[agentId] = agent
- logger.debug(f"Agent '{agent.name}' registered")
-
- def getAgent(self, agentIdentifier: str):
- """
- Return an agent instance
- Args:
- agentIdentifier: ID or type of the desired agent
- Returns:
- Agent instance or None if not found
- """
- if agentIdentifier in self.agents:
- agent = self.agents[agentIdentifier]
- # Ensure the agent has the AI service
- if hasattr(agent, 'setDependencies') and self.mydom:
- agent.setDependencies(mydom=self.mydom)
- return agent
- logger.error(f"Agent with identifier '{agentIdentifier}' not found")
- return None
-
- def getAllAgents(self) -> Dict[str, Any]:
- """Return all registered agents."""
- return self.agents
-
- def getAgentInfos(self) -> List[Dict[str, Any]]:
- """Return information about all registered agents."""
- agentInfos = []
- seenAgents = set()
- for agent in self.agents.values():
- if agent not in seenAgents:
- agentInfos.append(agent.getAgentInfo())
- seenAgents.add(agent)
- return agentInfos
-
-
-# Singleton factory for the agent registry
-def getAgentRegistry():
- return AgentRegistry.getInstance()
\ No newline at end of file
diff --git a/static/118_agentAnalyst.py b/static/118_agentAnalyst.py
deleted file mode 100644
index 853b3955..00000000
--- a/static/118_agentAnalyst.py
+++ /dev/null
@@ -1,670 +0,0 @@
-"""
-Data analyst agent for analysis and interpretation of data.
-Focuses on output-first design with AI-powered analysis.
-"""
-
-import logging
-import json
-import io
-import base64
-from typing import Dict, Any, List
-import pandas as pd
-import matplotlib.pyplot as plt
-import seaborn as sns
-
-from modules.workflowAgentsRegistry import AgentBase
-
-logger = logging.getLogger(__name__)
-
-class AgentAnalyst(AgentBase):
- """AI-driven agent for data analysis and visualization"""
-
- def __init__(self):
- """Initialize the data analysis agent"""
- super().__init__()
- self.name = "analyst"
- self.description = "Analyzes data using AI-powered insights and visualizations, produce diagrams and visualizations"
- self.capabilities = [
- "dataAnalysis",
- "statistics",
- "visualization",
- "dataInterpretation",
- "reportGeneration"
- ]
-
- # Set default visualization settings
- plt.style.use('seaborn-v0_8-whitegrid')
-
- def setDependencies(self, mydom=None):
- """Set external dependencies for the agent."""
- self.mydom = mydom
-
- async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
- """
- Process a task by focusing on required outputs and using AI to generate them.
-
- Args:
- task: Task dictionary with prompt, inputDocuments, outputSpecifications
-
- Returns:
- Dictionary with feedback and documents
- """
- try:
- # Extract task information
- prompt = task.get("prompt", "")
- inputDocuments = task.get("inputDocuments", [])
- outputSpecs = task.get("outputSpecifications", [])
-
- # Check AI service
- if not self.mydom:
- return {
- "feedback": "The Analyst agent requires an AI service to function.",
- "documents": []
- }
-
- # Extract data from documents - focusing only on dataExtracted
- datasets, documentContext = self._extractData(inputDocuments)
-
- # Generate task analysis to understand what's needed
- analysisPlan = await self._analyzeTask(prompt, documentContext, datasets, outputSpecs)
-
- # Generate all required output documents
- documents = []
-
- # If no output specs provided, create default analysis outputs
- if not outputSpecs:
- outputSpecs = []
-
- # Process each output specification
- for spec in outputSpecs:
- outputLabel = spec.get("label", "")
- outputDescription = spec.get("description", "")
-
- # Determine type based on file extension
- outputType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "txt"
-
- # Generate appropriate content based on output type
- if outputType in ['png', 'jpg', 'jpeg', 'svg']:
- # Create visualization
- document = await self._createVisualization(
- datasets, prompt, outputLabel, analysisPlan, outputDescription
- )
- documents.append(document)
- elif outputType in ['csv', 'json', 'xlsx']:
- # Create data document
- document = await self._createDataDocument(
- datasets, prompt, outputLabel, analysisPlan, outputDescription
- )
- documents.append(document)
- else:
- # Create text document (report, analysis, etc.)
- document = await self._createTextDocument(
- datasets, documentContext, prompt, outputLabel,
- outputType, analysisPlan, outputDescription
- )
- documents.append(document)
-
- # Generate feedback
- feedback = f"{analysisPlan.get('analysisApproach')}"
- if analysisPlan.get("keyInsights"):
- feedback += f"\n\n{analysisPlan.get('keyInsights')}"
-
- return {
- "feedback": feedback,
- "documents": documents
- }
-
- except Exception as e:
- logger.error(f"Error in analysis: {str(e)}", exc_info=True)
- return {
- "feedback": f"Error during analysis: {str(e)}",
- "documents": []
- }
-
- def _extractData(self, documents: List[Dict[str, Any]]) -> tuple:
- """
- Extract data from documents, focusing on dataExtracted fields.
-
- Args:
- documents: List of input documents
-
- Returns:
- Tuple of (datasets dictionary, document context text)
- """
- datasets = {}
- documentContext = ""
-
- # Process each document
- for doc in documents:
- docName = doc.get("name", "unnamed")
- if doc.get("ext"):
- docName = f"{docName}.{doc.get('ext')}"
-
- documentContext += f"\n\n--- {docName} ---\n"
-
- # Process contents
- for content in doc.get("contents", []):
- # Focus only on dataExtracted
- if content.get("dataExtracted"):
- extractedText = content.get("dataExtracted", "")
- documentContext += extractedText
-
- # Try to parse as structured data if appropriate
- if docName.lower().endswith(('.csv', '.tsv')):
- try:
- df = pd.read_csv(io.StringIO(extractedText))
- datasets[docName] = df
- except:
- pass
- elif docName.lower().endswith('.json'):
- try:
- jsonData = json.loads(extractedText)
- if isinstance(jsonData, list):
- df = pd.DataFrame(jsonData)
- datasets[docName] = df
- elif isinstance(jsonData, dict):
- # Handle nested JSON structures
- if any(isinstance(v, list) for v in jsonData.values()):
- for key, value in jsonData.items():
- if isinstance(value, list) and len(value) > 0:
- df = pd.DataFrame(value)
- datasets[f"{docName}:{key}"] = df
- else:
- df = pd.DataFrame([jsonData])
- datasets[docName] = df
- except:
- pass
-
- # Try to detect tabular data in text content
- if docName not in datasets and len(extractedText.splitlines()) > 2:
- lines = extractedText.splitlines()
- if any(',' in line for line in lines[:5]):
- try:
- df = pd.read_csv(io.StringIO(extractedText))
- if len(df.columns) > 1:
- datasets[docName] = df
- except:
- pass
- elif any('\t' in line for line in lines[:5]):
- try:
- df = pd.read_csv(io.StringIO(extractedText), sep='\t')
- if len(df.columns) > 1:
- datasets[docName] = df
- except:
- pass
-
- return datasets, documentContext
-
- async def _analyzeTask(self, prompt: str, context: str, datasets: Dict, outputSpecs: List) -> Dict:
- """
- Use AI to analyze the task and create a plan for analysis.
-
- Args:
- prompt: The task prompt
- context: Document context text
- datasets: Dictionary of extracted datasets
- outputSpecs: Output specifications
-
- Returns:
- Analysis plan dictionary
- """
- # Prepare dataset information
- datasetInfo = {}
- for name, df in datasets.items():
- try:
- datasetInfo[name] = {
- "shape": df.shape,
- "columns": df.columns.tolist(),
- "dtypes": {col: str(df[col].dtype) for col in df.columns},
- "sample": df.head(3).to_dict(orient='records')
- }
- except:
- datasetInfo[name] = {"error": "Could not process dataset"}
-
- analysisPrompt = f"""
- Analyze this data analysis task and create a plan.
-
- TASK: {prompt}
-
- AVAILABLE DATA:
- {json.dumps(datasetInfo, indent=2)}
-
- DOCUMENT CONTEXT:
- {context[:1000]}... (truncated)
-
- OUTPUT REQUIREMENTS:
- {json.dumps(outputSpecs, indent=2)}
-
- Create a detailed analysis plan in JSON format with the following structure:
- {{
- "analysisType": "statistical|trend|comparative|predictive|cluster|general",
- "keyQuestions": ["question1", "question2"],
- "recommendedVisualizations": [{{
- "type": "chart_type",
- "dataSource": "dataset_name",
- "variables": ["col1", "col2"],
- "purpose": "explanation"
- }}],
- "keyInsights": "brief summary of initial insights",
- "analysisApproach": "brief description of recommended approach"
- }}
-
- Only return valid JSON. No preamble or explanations.
- """
- try:
- response = await self.mydom.callAi([
- {"role": "system", "content": "You are a data analysis expert. Respond with valid JSON only."},
- {"role": "user", "content": analysisPrompt}
- ], produceUserAnswer = True)
-
- # Extract JSON from response
- jsonStart = response.find('{')
- jsonEnd = response.rfind('}') + 1
-
- if jsonStart >= 0 and jsonEnd > jsonStart:
- plan = json.loads(response[jsonStart:jsonEnd])
- return plan
- else:
- # Fallback if JSON not found
- return {
- "analysisType": "general",
- "keyQuestions": ["What insights can be extracted from this data?"],
- "recommendedVisualizations": [],
- "keyInsights": "Analysis plan could not be created",
- "analysisApproach": "General exploratory analysis"
- }
-
- except Exception as e:
- logger.warning(f"Error creating analysis plan: {str(e)}")
- return {
- "analysisType": "general",
- "keyQuestions": ["What insights can be extracted from this data?"],
- "recommendedVisualizations": [],
- "keyInsights": "Analysis plan could not be created",
- "analysisApproach": "General exploratory analysis"
- }
-
- async def _createVisualization(self, datasets: Dict, prompt: str, outputLabel: str,
- analysisPlan: Dict, description: str) -> Dict:
- """
- Create visualization document using AI guidance.
-
- Args:
- datasets: Dictionary of datasets
- prompt: Original task prompt
- outputLabel: Output filename
- analysisPlan: Analysis plan from AI
- description: Output description
-
- Returns:
- Visualization document
- """
- # Determine format from filename
- formatType = outputLabel.split('.')[-1].lower()
- if formatType not in ['png', 'jpg', 'jpeg', 'svg']:
- formatType = 'png'
-
- # If no datasets available, create error message image
- if not datasets:
- plt.figure(figsize=(10, 6))
- plt.text(0.5, 0.5, "No data available for visualization",
- ha='center', va='center', fontsize=14)
- plt.tight_layout()
- imgData = self._getImageBase64(formatType)
- plt.close()
-
- return {
- "label": outputLabel,
- "content": imgData,
- "metadata": {
- "contentType": f"image/{formatType}"
- }
- }
-
- # Get recommended visualization from plan
- recommendedViz = analysisPlan.get("recommendedVisualizations", [])
-
- # Prepare dataset info for the first dataset if none specified
- if not recommendedViz and datasets:
- name, df = next(iter(datasets.items()))
- recommendedViz = [{
- "type": "auto",
- "dataSource": name,
- "variables": df.columns.tolist()[:5],
- "purpose": "general analysis"
- }]
-
- # Create visualization code prompt
- vizPrompt = f"""
- Generate Python matplotlib/seaborn code to create a visualization for:
-
- TASK: {prompt}
-
- VISUALIZATION REQUIREMENTS:
- - Output format: {formatType}
- - Filename: {outputLabel}
- - Description: {description}
-
- RECOMMENDED VISUALIZATION:
- {json.dumps(recommendedViz, indent=2)}
-
- AVAILABLE DATASETS:
- """
-
- # Add dataset info for recommended sources
- for viz in recommendedViz:
- dataSource = viz.get("dataSource")
- if dataSource in datasets:
- df = datasets[dataSource]
- vizPrompt += f"\nDataset '{dataSource}':\n"
- vizPrompt += f"- Shape: {df.shape}\n"
- vizPrompt += f"- Columns: {df.columns.tolist()}\n"
- vizPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n"
-
- vizPrompt += """
- Generate ONLY Python code that:
- 1. Uses matplotlib and/or seaborn to create a clear visualization
- 2. Sets figure size to (10, 6)
- 3. Includes appropriate titles, labels, and legend
- 4. Uses professional color schemes
- 5. Handles any missing data gracefully
-
- Return ONLY executable Python code, no explanations or markdown.
- """
-
- try:
- # Get visualization code from AI
- vizCode = await self.mydom.callAi([
- {"role": "system", "content": "You are a data visualization expert. Provide only executable Python code."},
- {"role": "user", "content": vizPrompt}
- ], produceUserAnswer = True)
-
- # Clean code
- vizCode = vizCode.replace("```python", "").replace("```", "").strip()
-
- # Execute visualization code
- plt.figure(figsize=(10, 6))
-
- # Make local variables available to the code
- localVars = {
- "plt": plt,
- "sns": sns,
- "pd": pd,
- "np": __import__('numpy')
- }
-
- # Add datasets to local variables
- for name, df in datasets.items():
- # Create a sanitized variable name
- varName = ''.join(c if c.isalnum() else '_' for c in name)
- localVars[varName] = df
-
- # Also add with standard names for simpler code
- if "df" not in localVars:
- localVars["df"] = df
- elif "df2" not in localVars:
- localVars["df2"] = df
-
- # Execute the visualization code
- exec(vizCode, globals(), localVars)
-
- # Capture the image
- imgData = self._getImageBase64(formatType)
- plt.close()
-
- return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
-
- except Exception as e:
- logger.error(f"Error creating visualization: {str(e)}", exc_info=True)
-
- # Create error message image
- plt.figure(figsize=(10, 6))
- plt.text(0.5, 0.5, f"Visualization error: {str(e)}",
- ha='center', va='center', fontsize=12)
- plt.tight_layout()
- imgData = self._getImageBase64(formatType)
- plt.close()
-
- return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
-
- async def _createDataDocument(self, datasets: Dict, prompt: str, outputLabel: str,
- analysisPlan: Dict, description: str) -> Dict:
- """
- Create a data document (e.g., CSV, JSON) based on analysis.
-
- Args:
- datasets: Dictionary of datasets
- prompt: Original task prompt
- outputLabel: Output filename
- analysisPlan: Analysis plan from AI
- description: Output description
-
- Returns:
- Data document
- """
- # Determine format from filename
- formatType = outputLabel.split('.')[-1].lower()
-
- # If no datasets available, return error message
- if not datasets:
- return {
- "label": outputLabel,
- "content": f"No data available for processing into {formatType} format.",
- "metadata": {
- "contentType": "text/plain"
- }
- }
-
- # Generate data processing instructions
- dataPrompt = f"""
- Create Python code to process datasets and generate a {formatType} file for:
-
- TASK: {prompt}
-
- OUTPUT REQUIREMENTS:
- - Format: {formatType}
- - Filename: {outputLabel}
- - Description: {description}
-
- ANALYSIS CONTEXT:
- {json.dumps(analysisPlan, indent=2)}
-
- AVAILABLE DATASETS:
- """
-
- # Add dataset info
- for name, df in datasets.items():
- dataPrompt += f"\nDataset '{name}':\n"
- dataPrompt += f"- Shape: {df.shape}\n"
- dataPrompt += f"- Columns: {df.columns.tolist()}\n"
- dataPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n"
-
- dataPrompt += """
- Generate Python code that:
- 1. Processes the available dataset(s)
- 2. Performs necessary transformations, aggregations, or calculations
- 3. Outputs the result in the requested format
- 4. Returns the content as a string variable named 'result'
-
- Return ONLY executable Python code, no explanations or markdown.
- """
-
- try:
- # Get data processing code from AI
- dataCode = await self.mydom.callAi([
- {"role": "system", "content": "You are a data processing expert. Provide only executable Python code."},
- {"role": "user", "content": dataPrompt}
- ], produceUserAnswer = True)
-
- # Clean code
- dataCode = dataCode.replace("```python", "").replace("```", "").strip()
-
- # Setup execution environment
- localVars = {"pd": pd, "np": __import__('numpy'), "io": io}
-
- # Add datasets to local variables
- for name, df in datasets.items():
- # Create a sanitized variable name
- varName = ''.join(c if c.isalnum() else '_' for c in name)
- localVars[varName] = df
-
- # Also add with standard names for simpler code
- if "df" not in localVars:
- localVars["df"] = df
- elif "df2" not in localVars:
- localVars["df2"] = df
-
- # Execute the code
- exec(dataCode, globals(), localVars)
-
- # Get the result
- result = localVars.get("result", "No output was generated.")
-
- # Determine content type
- contentType = "text/csv" if formatType == "csv" else \
- "application/json" if formatType == "json" else \
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" if formatType == "xlsx" else \
- "text/plain"
-
- return self.formatAgentDocumentOutput(outputLabel, result, contentType)
-
-
- except Exception as e:
- logger.error(f"Error creating data document: {str(e)}", exc_info=True)
-
- return {
- "label": outputLabel,
- "content": f"Error generating {formatType} document: {str(e)}",
- "metadata": {
- "contentType": "text/plain"
- }
- }
-
- async def _createTextDocument(self, datasets: Dict, context: str, prompt: str,
- outputLabel: str, formatType: str,
- analysisPlan: Dict, description: str) -> Dict:
- """
- Create a text document (report, analysis, etc.) based on analysis.
-
- Args:
- datasets: Dictionary of datasets
- context: Document context text
- prompt: Original task prompt
- outputLabel: Output filename
- formatType: Output format type
- analysisPlan: Analysis plan from AI
- description: Output description
-
- Returns:
- Text document
- """
- # Create dataset summaries
- datasetSummaries = []
- for name, df in datasets.items():
- summary = f"Dataset: {name}\n"
- summary += f"- Shape: {df.shape[0]} rows, {df.shape[1]} columns\n"
- summary += f"- Columns: {', '.join(df.columns.tolist())}\n"
-
- # Basic statistics for numeric columns
- numericCols = df.select_dtypes(include=['number']).columns
- if len(numericCols) > 0:
- summary += "- Numeric Columns Stats:\n"
- for col in numericCols[:3]: # Limit to first 3
- stats = df[col].describe()
- summary += f" - {col}: min={stats['min']:.2f}, max={stats['max']:.2f}, mean={stats['mean']:.2f}\n"
-
- datasetSummaries.append(summary)
-
- # Determine content type based on format
- contentType = "text/markdown" if formatType in ["md", "markdown"] else \
- "text/html" if formatType == "html" else \
- "text/plain"
-
- # Generate analysis prompt
- analysisPrompt = f"""
- Create a detailed {formatType} document for:
-
- TASK: {prompt}
-
- OUTPUT REQUIREMENTS:
- - Format: {formatType}
- - Filename: {outputLabel}
- - Description: {description}
-
- ANALYSIS CONTEXT:
- {json.dumps(analysisPlan, indent=2)}
-
- DATASET SUMMARIES:
- {"".join(datasetSummaries)}
-
- DOCUMENT CONTEXT:
- {context[:2000]}... (truncated)
-
- Create a comprehensive, professional analysis document that addresses the task requirements.
- The document should:
- 1. Have a clear structure with headings and sections
- 2. Include relevant data findings and insights
- 3. Provide appropriate interpretations and recommendations
- 4. Format the content according to the required output format
-
- Your response should be the complete document content in the specified format.
- """
-
- try:
- # Get document content from AI
- documentContent = await self.mydom.callAi([
- {"role": "system", "content": f"You are a data analysis expert creating a {formatType} document."},
- {"role": "user", "content": analysisPrompt}
- ], produceUserAnswer = True)
-
- # Clean HTML or Markdown if needed
- if formatType in ["md", "markdown"] and not documentContent.strip().startswith("#"):
- documentContent = f"# Analysis Report\n\n{documentContent}"
- elif formatType == "html" and not "
{documentContent}"
-
- return self.formatAgentDocumentOutput(outputLabel, documentContent, contentType)
-
- except Exception as e:
- logger.error(f"Error creating text document: {str(e)}", exc_info=True)
-
- # Create a simple error document
- if formatType in ["md", "markdown"]:
- content = f"# Error in Analysis\n\nThere was an error generating the analysis: {str(e)}"
- elif formatType == "html":
- content = f"
Error in Analysis
There was an error generating the analysis: {str(e)}
"
- else:
- content = f"Error in Analysis\n\nThere was an error generating the analysis: {str(e)}"
-
- return {
- "label": outputLabel,
- "content": content,
- "metadata": {
- "contentType": contentType
- }
- }
-
- def _getImageBase64(self, formatType: str = 'png') -> str:
- """
- Convert current matplotlib figure to base64 string.
-
- Args:
- formatType: Image format
-
- Returns:
- Base64 encoded string of the image
- """
- buffer = io.BytesIO()
- plt.savefig(buffer, format=formatType, dpi=100)
- buffer.seek(0)
- imageData = buffer.getvalue()
- buffer.close()
-
- # Convert to base64
- return base64.b64encode(imageData).decode('utf-8')
-
-
-# Factory function for the Analyst agent
-def getAgentAnalyst():
- """Returns an instance of the Analyst agent."""
- return AgentAnalyst()
\ No newline at end of file
diff --git a/static/119_agentCoder.py b/static/119_agentCoder.py
deleted file mode 100644
index a263c68e..00000000
--- a/static/119_agentCoder.py
+++ /dev/null
@@ -1,764 +0,0 @@
-"""
-Simple Coder Agent for execution of Python code.
-Modified to pass expected output document names to the generated code.
-"""
-
-import logging
-import json
-import os
-import subprocess
-import tempfile
-import shutil
-import sys
-from typing import Dict, Any, List, Tuple
-
-from modules.workflowAgentsRegistry import AgentBase
-from modules.configuration import APP_CONFIG
-
-logger = logging.getLogger(__name__)
-
-class AgentCoder(AgentBase):
- """Simplified Agent for developing and executing Python code with integrated executor"""
-
- def __init__(self):
- """Initialize the coder agent"""
- super().__init__()
- self.name = "coder"
- self.description = "Develops and executes Python code for data processing and automation"
- self.capabilities = [
- "code_development",
- "data_processing",
- "file_processing",
- "automation",
- "code_execution"
- ]
-
- # Executor settings
- self.executorTimeout = int(APP_CONFIG.get("Agent_Coder_EXECUTION_TIMEOUT")) # seconds
- self.executionRetryLimit = int(APP_CONFIG.get("Agent_Coder_EXECUTION_RETRY")) # max retries
- self.tempDir = None
-
- def setDependencies(self, mydom=None):
- """Set external dependencies for the agent."""
- self.mydom = mydom
-
- async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
- """
- Process a task and perform code development/execution.
- First checks if the task can be completed without code execution,
- then falls back to code generation if needed.
- Enhanced to ensure all generated documents are included in output.
-
- Args:
- task: Task dictionary with prompt, inputDocuments, outputSpecifications
-
- Returns:
- Dictionary with feedback and documents
- """
- # 1. Extract task information
- prompt = task.get("prompt", "")
- inputDocuments = task.get("inputDocuments", [])
- outputSpecs = task.get("outputSpecifications", [])
-
- # Check if AI service is available
- if not self.mydom:
- logger.error("No AI service configured for the Coder agent")
- return {
- "feedback": "The Coder agent is not properly configured.",
- "documents": []
- }
-
- # 2. Extract data from documents in separate categories
- documentData = [] # For raw file data (for code execution)
- contentData = [] # For content data (later use)
- contentExtraction = [] # For AI-extracted data (for quick completion)
-
- for doc in inputDocuments:
- # Create proper filename from name and ext
- filename = f"{doc.get('name')}.{doc.get('ext')}" if doc.get('ext') else doc.get('name')
-
- # Add main document data to documentData if it exists
- docData = doc.get('data', '')
- if docData:
- isBase64 = True # Assume base64 encoded for document data
- documentData.append([filename, docData, isBase64])
-
- # Process contents for different uses
- if doc.get('contents'):
- for content in doc.get('contents', []):
- contentName = content.get('name', 'unnamed')
-
- # For AI-extracted data (quick completion)
- if content.get('dataExtracted'):
- contentExtraction.append({
- "filename": filename,
- "contentName": contentName,
- "contentData": content.get('dataExtracted', ''),
- "contentType": content.get('contentType', ''),
- "summary": content.get('summary', '')
- })
-
- # For raw content data
- if content.get('data'):
- rawData = content.get('data', '')
- isBase64 = content.get('metadata', {}).get('base64Encoded', False)
- contentData.append({
- "filename": filename,
- "contentName": contentName,
- "data": rawData,
- "isBase64": isBase64,
- "contentType": content.get('contentType', '')
- })
-
- # Also add to documentData for code execution if not already added
- if not docData or docData != rawData:
- documentData.append([filename, rawData, isBase64])
-
- # 3. Check if task can be completed without code execution
- quickCompletion = await self._checkQuickCompletion(prompt, contentExtraction, outputSpecs)
-
- if quickCompletion and quickCompletion.get("complete") == 1:
- logger.info("Task completed without code execution")
- return {
- "feedback": quickCompletion.get("prompt", "Task completed successfully."),
- "documents": quickCompletion.get("documents", [])
- }
- else:
- logger.debug(f"Code to generate, no quick check")
-
- # If quick completion not possible, continue with code generation and execution
- logger.info("Generating code to solve the task")
-
- # 4. Generate code using AI
- code, requirements = await self._generateCode(prompt, outputSpecs)
-
- if not code:
- return {
- "feedback": "Failed to generate code for the task.",
- "documents": []
- }
-
- # 5. Replace the placeholder with actual inputFiles data
- documentDataJson = repr(documentData)
- codeWithData = code.replace("inputFiles = \"=== JSONLOAD ===\"", f"inputFiles = {documentDataJson}")
-
- # 6. Execute code with retry logic
- retryCount = 0
- maxRetries = self.executionRetryLimit
- executionHistory = []
-
- while retryCount <= maxRetries:
- executionResult = self._executeCode(codeWithData, requirements)
- executionHistory.append({
- "attempt": retryCount + 1,
- "code": codeWithData,
- "result": executionResult
- })
-
- # Check if execution was successful
- if executionResult.get("success", False):
- logger.info(f"Code execution succeeded on attempt {retryCount + 1}")
- break
-
- # If we've reached max retries, exit the loop
- if retryCount >= maxRetries:
- logger.info(f"Reached maximum retry limit ({maxRetries}). Giving up.")
- break
-
- # Log the error and attempt to improve the code
- error = executionResult.get("error", "Unknown error")
- logger.info(f"Execution attempt {retryCount + 1} failed: {error}. Attempting to improve code.")
-
- # Generate improved code based on error
- improvedCode, improvedRequirements = await self._improveCode(
- originalCode=codeWithData,
- error=error,
- executionResult=executionResult,
- attempt=retryCount + 1,
- outputSpecs=outputSpecs
- )
-
- if improvedCode:
- codeWithData = improvedCode
- requirements = improvedRequirements
- logger.info(f"Code improved for retry {retryCount + 2}")
- else:
- logger.warning("Failed to improve code, using original code for retry")
-
- retryCount += 1
-
- # 7. Process results and create output documents
- documents = []
-
- # Always add the final code document
- documents.append(self.formatAgentDocumentOutput("generated_code.py", codeWithData, "text/plain"))
-
- # Add execution history document
- executionHistoryStr = json.dumps(executionHistory, indent=2)
- documents.append(self.formatAgentDocumentOutput("execution_history.json", executionHistoryStr, "application/json"))
-
- # Enhanced result handling: Create documents based on execution results - fixed for proper content extraction
- if executionResult.get("success", False):
- resultData = executionResult.get("result")
-
- # Process results from the result dictionary if available
- if isinstance(resultData, dict):
- # First, create a mapping of expected output labels to their specs
- expectedOutputs = {spec.get("label"): spec for spec in outputSpecs}
- createdOutputs = set()
-
- for label, result_item in resultData.items():
- # Check if result follows the expected structure with nested content
- if isinstance(result_item, dict) and "content" in result_item:
- # Extract values from the properly structured result
- content = result_item.get("content", "") # Extract the inner content
- base64Encoded = result_item.get("base64Encoded", False)
- contentType = result_item.get("contentType", "text/plain")
-
- # Check if this label matches one of our expected output documents
- # If not, but we haven't created all expected outputs yet, try to map it
- finalLabel = label
- if label not in expectedOutputs and len(expectedOutputs) > 0:
- # Find an unused expected output label
- for expectedLabel in expectedOutputs:
- if expectedLabel not in createdOutputs:
- logger.warning(f"Remapping output '{label}' to expected '{expectedLabel}'")
- finalLabel = expectedLabel
- break
-
- # Create document by passing only the content to formatAgentDocumentOutput
- doc = self.formatAgentDocumentOutput(finalLabel, content, contentType)
-
- # Override the base64Encoded flag with the value from the result
- # This is needed since formatAgentDocumentOutput might determine a different value
- if isinstance(base64Encoded, bool):
- doc["base64Encoded"] = base64Encoded
-
- documents.append(doc)
- createdOutputs.add(finalLabel)
- logger.info(f"Created document from result: {finalLabel} ({contentType}, base64={base64Encoded})")
- else:
- # Not properly structured - log warning
- logger.warning(f"Skipping improperly formatted result for '{label}'. Results must include 'content' field.")
- else:
- # No result dictionary found
- logger.warning("No valid result dictionary found or it's not properly formatted")
-
- # If no valid documents were created from the result dictionary but we have output specifications
- if len(documents) <= 2 and outputSpecs: # Only code.py and history.json exist
- logger.warning("No valid documents created from result dictionary, using execution output for specifications")
- # Default to execution output
- output = executionResult.get("output", "")
- for spec in outputSpecs:
- label = spec.get("label", "output.txt")
- # Create basic document from output
- doc = self.formatAgentDocumentOutput(label, output, "text/plain")
- documents.append(doc)
- logger.info(f"Created document from output specification: {label}")
-
- if retryCount > 0:
- feedback = f"Code executed successfully after {retryCount + 1} attempts. Generated {len(documents) - 2} output files."
- else:
- feedback = f"Code executed successfully. Generated {len(documents) - 2} output files."
- else:
- # Execution failed
- error = executionResult.get("error", "Unknown error")
- documents.append(self.formatAgentDocumentOutput("execution_error.txt", f"Error executing code:\n\n{error}", "text/plain"))
- if retryCount > 0:
- feedback = f"Error during code execution after {retryCount + 1} attempts: {error}"
- else:
- feedback = f"Error during code execution: {error}"
-
- return {
- "feedback": feedback,
- "documents": documents
- }
-
- async def _improveCode(self, originalCode: str, error: str, executionResult: Dict[str, Any], attempt: int, outputSpecs: List[Dict[str, Any]] = None) -> Tuple[str, List[str]]:
- """
- Improve code based on execution error.
- Enhanced to maintain proper output handling with correct document structure.
-
- Args:
- originalCode: The code that failed to execute
- error: The error message
- executionResult: Complete execution result dictionary
- attempt: Current attempt number
- outputSpecs: List of expected output specifications
-
- Returns:
- Tuple of (improvedCode, requirements)
- """
- # Create a string with output specifications to be included in the prompt
- outputSpecsStr = ""
- if outputSpecs:
- outputSpecsStr = "\nEXPECTED OUTPUT DOCUMENTS:\n"
- for i, spec in enumerate(outputSpecs, 1):
- label = spec.get("label", f"output{i}.txt")
- description = spec.get("description", "")
- outputSpecsStr += f"{i}. {label} - {description}\n"
-
- # Create prompt for code improvement
- improvementPrompt = f"""
-Fix the following Python code that failed during execution. This is attempt {attempt} to fix the code.
-
-ORIGINAL CODE:
-{originalCode}
-
-ERROR MESSAGE:
-{error}
-
-STDOUT:
-{executionResult.get('output', '')}
-{outputSpecsStr}
-INSTRUCTIONS:
-1. Fix all errors identified in the error message
-2. Diagnose and fix any logical issues
-3. Pay special attention to:
-- Type conversions and data handling
-- Error handling and edge cases
-- Resource management (file handles, etc.)
-- Syntax errors and typos
-4. Keep the inputFiles handling logic intact
-5. Maintain the same overall structure and purpose
-
-OUTPUT REQUIREMENTS (VERY IMPORTANT):
-- Your code MUST define a 'result' variable as a dictionary to store ALL outputs
-- The key for each entry MUST be the full filename with extension (e.g., "output.txt")
-- The value for each entry MUST be a dictionary with the following structure:
-{{
- "content": string, # The actual content (text or base64-encoded string)
- "base64Encoded": boolean, # Set to true for binary data, false for text data
- "contentType": string # MIME type of the content (e.g., "text/plain", "application/json")
-}}
-- Example result dictionary:
-result = {{
- "output.txt": {{
- "content": "This is text content",
- "base64Encoded": False,
- "contentType": "text/plain"
- }},
- "chart.png": {{
- "content": "base64encodedstring...",
- "base64Encoded": True,
- "contentType": "image/png"
- }}
-}}
-- NEVER write files to disk using open() or similar methods - use the result dictionary instead
-
-JSON OUTPUT (CRITICAL):
-- After creating the result dictionary, you MUST print it as JSON to stdout
-- Make sure your code includes: print(json.dumps(result)) as the final line
-- This printed JSON is how the system captures your result
-
-REQUIREMENTS:
-Required packages should be specified as:
-# REQUIREMENTS: library==version,library2>=version
-- You may add/remove requirements as needed to fix the code
-
-Return ONLY Python code without explanations or markdown.
-"""
-
- # Call AI service
- messages = [
- {"role": "system", "content": "You are an expert Python code debugger. Provide only fixed Python code without explanations or formatting. Ensure all generated files are included in the 'result' dictionary and that result is printed as JSON with print(json.dumps(result))."},
- {"role": "user", "content": improvementPrompt}
- ]
-
- try:
- improvedContent = await self.mydom.callAi(messages, temperature=0.2)
-
- # Extract code and requirements
- improvedCode = self._cleanCode(improvedContent)
-
- # Extract requirements
- requirements = []
- for line in improvedCode.split('\n'):
- if line.strip().startswith("# REQUIREMENTS:"):
- reqStr = line.replace("# REQUIREMENTS:", "").strip()
- requirements = [r.strip() for r in reqStr.split(',') if r.strip()]
- break
-
- return improvedCode, requirements
- except Exception as e:
- logger.error(f"Error improving code: {str(e)}")
- return None, []
-
-
- async def _checkQuickCompletion(self, prompt: str, contentExtraction: List[Dict], outputSpecs: List[Dict]) -> Dict:
- """
- Check if the task can be completed without writing and executing code.
-
- Args:
- prompt: The task prompt
- contentExtraction: List of extracted content data with contentName and dataExtracted
- outputSpecs: List of output specifications
-
- Returns:
- Dictionary with completion status and results, or None if no quick completion
- """
- # If no data or no output specs, can't do a quick completion
- if not contentExtraction or not outputSpecs:
- return None
-
- # Create a prompt for the AI to check if this can be completed directly
- specsJson = json.dumps(outputSpecs)
- dataJson = json.dumps(contentExtraction)
-
- checkPrompt = f"""
-Analyze this task and determine if it can be completed directly without writing code.
-
-TASK:
-{prompt}
-
-EXTRACTED DATA AVAILABLE:
-{dataJson}
-
-Each entry in the extracted data contains:
-- filename: The source file name
-- contentName: The specific content section name
-- contentData: The AI-extracted text from the content
-- contentType: The type of content (text, csv, etc.)
-- summary: A brief summary of the content
-
-REQUIRED OUTPUT:
-{specsJson}
-
-If the task can be completed directly with the available extracted data, respond with:
-{{"complete": 1, "prompt": "Brief explanation of the solution", "documents": [
- {{"label": "filename.ext", "content": "content here"}}
-]}}
-
-If code would be needed to properly complete this task, respond with:
-{{"complete": 0, "prompt": "Explanation why code is needed"}}
-
-Only return valid JSON. Your entire response must be parseable as JSON.
-"""
-
- # Call AI service
- logger.debug(f"Checking if task can be completed without code execution: {checkPrompt}")
- messages = [
- {"role": "system", "content": "You are an AI assistant that determines if tasks require code execution. Reply with JSON only."},
- {"role": "user", "content": checkPrompt}
- ]
-
- try:
- # Use a lower temperature for more deterministic response
- response = await self.mydom.callAi(messages, produceUserAnswer = True, temperature=0.1)
-
- # Parse response as JSON
- if response:
- try:
- # Find JSON in response if there's any text around it
- jsonStart = response.find('{')
- jsonEnd = response.rfind('}') + 1
-
- if jsonStart >= 0 and jsonEnd > jsonStart:
- jsonStr = response[jsonStart:jsonEnd]
- result = json.loads(jsonStr)
-
- # Check if this is a proper response
- if "complete" in result:
- return result
-
- except json.JSONDecodeError:
- logger.debug("Failed to parse quick completion response as JSON")
- pass
- except Exception as e:
- logger.debug(f"Error during quick completion check: {str(e)}")
-
- # Default to requiring code execution
- return None
-
- async def _generateCode(self, prompt: str, outputSpecs: List[Dict[str, Any]] = None) -> Tuple[str, List[str]]:
- """
- Generate Python code from a prompt with the inputFiles placeholder.
- Enhanced to emphasize proper result output handling with correct document structure.
-
- Args:
- prompt: The task prompt
- outputSpecs: List of expected output specifications
-
- Returns:
- Tuple of (code, requirements)
- """
- # Create a string with output specifications to be included in the prompt
- outputSpecsStr = ""
- if outputSpecs:
- outputSpecsStr = "\nEXPECTED OUTPUT DOCUMENTS:\n"
- for i, spec in enumerate(outputSpecs, 1):
- label = spec.get("label", f"output{i}.txt")
- description = spec.get("description", "")
- outputSpecsStr += f"{i}. {label} - {description}\n"
-
- # Create improved prompt for code generation
- aiPrompt = f"""
-Generate Python code to solve the following task:
-
-TASK:
-{prompt}
-{outputSpecsStr}
-INPUT FILES:
-- 'inputFiles' variable is provided as [[filename, data, isBase64], ...]
-- For text files (isBase64=False): use data directly as string
-- For binary files (isBase64=True): use base64.b64decode(data)
-
-OUTPUT REQUIREMENTS (VERY IMPORTANT):
-- Your code MUST define a 'result' variable as a dictionary to store ALL outputs
-- The key for each entry MUST be the full filename with extension (e.g., "output.txt")
-- The value for each entry MUST be a dictionary with the following structure:
-{{
- "content": string, # The actual content (text or base64-encoded string)
- "base64Encoded": boolean, # Set to true for binary data, false for text data
- "contentType": string # MIME type of the content (e.g., "text/plain", "application/json")
-}}
-- Example result dictionary:
-result = {{
- "output.txt": {{
- "content": "This is text content",
- "base64Encoded": False,
- "contentType": "text/plain"
- }},
- "chart.png": {{
- "content": "base64encodedstring...",
- "base64Encoded": True,
- "contentType": "image/png"
- }}
-}}
-- NEVER write files to disk using open() or similar methods - use the result dictionary instead
-- If you generate any charts, reports, or visualizations, ensure they are properly encoded and included
-
-IMPORTANT - USE EXACT OUTPUT FILENAMES:
-- You MUST use the EXACT filenames specified in EXPECTED OUTPUT DOCUMENTS section
-- The key in the result dictionary must match these filenames precisely
-- If no output documents are specified, use appropriate descriptive filenames
-
-JSON OUTPUT (CRITICAL):
-- After creating the result dictionary, you MUST print it as JSON to stdout using json.dumps()
-- Add these lines at the end of your code:
- import json # if not already imported
- print(json.dumps(result))
-- This printed JSON is how the system captures your result
-- Make sure this is the last thing your code prints
-
-BINARY DATA HANDLING:
-- For binary content (images, PDFs, etc.), convert to base64 string and set base64Encoded=True
-- For text content (text, JSON, HTML, etc.), use plain string and set base64Encoded=False
-- Use appropriate MIME types for different content types
-
-CODE QUALITY:
-- Use explicit type conversions where needed (int/float/str)
-- Implement feature detection, not version checks
-- Handle errors gracefully with appropriate fallbacks
-- Follow latest API conventions for libraries
-- Validate inputs before processing
-
-Your code must start with:
-inputFiles = "=== JSONLOAD ===" # DO NOT CHANGE THIS LINE
-
-REQUIREMENTS:
-Required packages should be specified as:
-# REQUIREMENTS: library==version,library2>=version
-- Specify exact versions for critical libraries
-- Use constraint operators (==,>=,<=) as needed
-
-Return ONLY Python code without explanations or markdown.
-"""
-
- # Call AI service
- messages = [
- {"role": "system", "content": "You are a Python code generator. Provide only valid Python code without explanations or formatting. Always output the result dictionary as JSON using print(json.dumps(result)) at the end of your code."},
- {"role": "user", "content": aiPrompt}
- ]
-
- generatedContent = await self.mydom.callAi(messages, temperature=0.1)
-
- # Extract code and requirements
- code = self._cleanCode(generatedContent)
-
- # Extract requirements
- requirements = []
- for line in code.split('\n'):
- if line.strip().startswith("# REQUIREMENTS:"):
- reqStr = line.replace("# REQUIREMENTS:", "").strip()
- requirements = [r.strip() for r in reqStr.split(',') if r.strip()]
- break
-
- return code, requirements
-
- def _executeCode(self, code: str, requirements: List[str] = None) -> Dict[str, Any]:
- """
- Execute Python code in a virtual environment.
- Integrated executor functionality with enhanced result extraction.
-
- Args:
- code: Python code to execute
- requirements: List of required packages
-
- Returns:
- Execution result dictionary
- """
- try:
- # 1. Create temp directory and virtual environment
- self.tempDir = tempfile.mkdtemp(prefix="code_exec_")
- venvPath = os.path.join(self.tempDir, "venv")
-
- # Create venv
- logger.debug(f"Creating virtual environment at {venvPath}")
- subprocess.run([sys.executable, "-m", "venv", venvPath],
- check=True, capture_output=True)
-
- # Get Python executable path
- pythonExe = os.path.join(venvPath, "Scripts", "python.exe") if os.name == 'nt' else os.path.join(venvPath, "bin", "python")
-
- # 2. Install requirements if provided
- if requirements:
- logger.info(f"Installing requirements: {requirements}")
-
- # Create requirements.txt
- reqFile = os.path.join(self.tempDir, "requirements.txt")
- with open(reqFile, "w") as f:
- f.write("\n".join(requirements))
-
- x="\n".join(requirements)
- logger.info(f"Requirements file: {x}.")
-
- # Install requirements
- try:
- pipResult = subprocess.run(
- [pythonExe, "-m", "pip", "install", "-r", reqFile],
- capture_output=True,
- text=True,
- timeout=int(APP_CONFIG.get("Agent_Coder_INSTALL_TIMEOUT"))
- )
- if pipResult.returncode != 0:
- logger.debug(f"Error installing requirements: {pipResult.stderr}")
- else:
- logger.debug(f"Requirements installed successfully")
- # Log installed packages if in debug mode
- if logger.isEnabledFor(logging.DEBUG):
- pipList = subprocess.run(
- [pythonExe, "-m", "pip", "list"],
- capture_output=True,
- text=True
- )
- logger.debug(f"Installed packages:\n{pipList.stdout}")
-
- except Exception as e:
- logger.debug(f"Exception during requirements installation: {str(e)}")
-
- # 3. Write code to file
- codeFile = os.path.join(self.tempDir, "code.py")
- with open(codeFile, "w", encoding="utf-8") as f:
- f.write(code)
-
- # 4. Execute code
- logger.debug(f"Executing code with timeout of {self.executorTimeout} seconds. Code: {code}")
- process = subprocess.run(
- [pythonExe, codeFile],
- timeout=self.executorTimeout,
- capture_output=True,
- text=True
- )
-
- # 5. Process results
- stdout = process.stdout
- stderr = process.stderr
-
- # Try to extract result from stdout
- resultData = None
- if process.returncode == 0:
- try:
- # Find the last line that might be JSON
- jsonLines = []
- for line in stdout.strip().split('\n'):
- line = line.strip()
- if line and line[0] in '{[' and line[-1] in '}]':
- try:
- parsed = json.loads(line)
- jsonLines.append((line, parsed))
- except json.JSONDecodeError:
- continue
-
- # Use the last valid JSON that appears to be a dictionary
- if jsonLines:
- for line, parsed in reversed(jsonLines):
- if isinstance(parsed, dict):
- resultData = parsed
- logger.debug(f"Extracted result data from stdout: {type(resultData)}")
- break
- except Exception as e:
- logger.debug(f"Error extracting result from stdout: {str(e)}")
-
- # Enhanced logging of what was found
- if resultData:
- logger.info(f"Found result dictionary with {len(resultData)} entries: {list(resultData.keys())}")
- else:
- logger.warning("No result dictionary found in output")
-
- # Create result dictionary
- return {
- "success": process.returncode == 0,
- "output": stdout,
- "error": stderr if process.returncode != 0 else "",
- "result": resultData,
- "exitCode": process.returncode
- }
-
- except subprocess.TimeoutExpired:
- logger.error(f"Execution timed out after {self.executorTimeout} seconds")
- return {
- "success": False,
- "output": "",
- "error": f"Execution timed out after {self.executorTimeout} seconds",
- "result": None,
- "exitCode": -1
- }
- except Exception as e:
- logger.error(f"Execution error: {str(e)}")
- return {
- "success": False,
- "output": "",
- "error": f"Execution error: {str(e)}",
- "result": None,
- "exitCode": -1
- }
- finally:
- # Clean up resources
- self._cleanupExecution()
-
- def _cleanupExecution(self):
- """Clean up temporary resources from code execution."""
- if self.tempDir and os.path.exists(self.tempDir):
- try:
- logger.debug(f"Cleaning up temporary directory: {self.tempDir}")
- shutil.rmtree(self.tempDir)
- self.tempDir = None
- except Exception as e:
- logger.warning(f"Error cleaning up temp directory: {str(e)}")
-
- def _cleanCode(self, code: str) -> str:
- """Remove any markdown formatting or explanations."""
- # Remove code block markers
- code = code.replace("```python", "").replace("```", "")
-
- # Remove explanations before or after code
- lines = code.strip().split('\n')
- startIndex = 0
- endIndex = len(lines)
-
- # Find start of actual code
- for i, line in enumerate(lines):
- if line.strip().startswith("inputFiles =") or line.strip().startswith("# REQUIREMENTS:"):
- startIndex = i
- break
-
- # Clean code
- cleanedCode = '\n'.join(lines[startIndex:endIndex])
- return cleanedCode.strip()
-
-
-# Factory function for the Coder agent
-def getAgentCoder():
- """Returns an instance of the Coder agent."""
- return AgentCoder()
\ No newline at end of file
diff --git a/static/120_agentDocumentation.py b/static/120_agentDocumentation.py
deleted file mode 100644
index daae8a97..00000000
--- a/static/120_agentDocumentation.py
+++ /dev/null
@@ -1,559 +0,0 @@
-"""
-Documentation agent for creating documentation, reports, and structured content.
-Reimagined with an output-first, AI-driven approach with multi-step document generation.
-"""
-
-import logging
-import json
-from typing import Dict, Any, List
-
-from modules.workflowAgentsRegistry import AgentBase
-
-logger = logging.getLogger(__name__)
-
-class AgentDocumentation(AgentBase):
- """AI-driven agent for creating documentation and structured content using multi-step generation"""
-
- def __init__(self):
- """Initialize the documentation agent"""
- super().__init__()
- self.name = "documentation"
- self.description = "Creates structured documentation, reports, and content using AI with multi-step generation"
- self.capabilities = [
- "report_generation",
- "documentation",
- "content_structuring",
- "technical_writing",
- "knowledge_organization"
- ]
-
- def setDependencies(self, mydom=None):
- """Set external dependencies for the agent."""
- self.mydom = mydom
-
- async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
- """
- Process a task by focusing on required outputs and using AI to generate them.
-
- Args:
- task: Task dictionary with prompt, inputDocuments, outputSpecifications
-
- Returns:
- Dictionary with feedback and documents
- """
- try:
- # Extract task information
- prompt = task.get("prompt", "")
- inputDocuments = task.get("inputDocuments", [])
- outputSpecs = task.get("outputSpecifications", [])
-
- # Check AI service
- if not self.mydom:
- return {
- "feedback": "The Documentation agent requires an AI service to function.",
- "documents": []
- }
-
- # Extract context from input documents - focusing only on dataExtracted
- documentContext = self._extractDocumentContext(inputDocuments)
-
- # Create task analysis to understand the requirements
- documentationPlan = await self._analyzeTask(prompt, documentContext, outputSpecs)
-
- # Generate all required output documents
- documents = []
-
- # If no output specs provided, create default document
- if not outputSpecs:
- defaultFormat = documentationPlan.get("recommendedFormat", "markdown")
- defaultTitle = documentationPlan.get("title", "Documentation")
- safeTitle = self._sanitizeFilename(defaultTitle)
-
- outputSpecs = [
- {"label": f"{safeTitle}.{defaultFormat}", "description": "Comprehensive documentation"}
- ]
-
- # Process each output specification
- for spec in outputSpecs:
- outputLabel = spec.get("label", "")
- outputDescription = spec.get("description", "")
-
- # Generate the document using multi-step approach
- document = await self._createDocumentMultiStep(
- prompt,
- documentContext,
- outputLabel,
- outputDescription,
- documentationPlan
- )
-
- documents.append(document)
-
- # Generate feedback
- feedback = documentationPlan.get("feedback", f"Created {len(documents)} documents based on your requirements.")
-
- return {
- "feedback": feedback,
- "documents": documents
- }
-
- except Exception as e:
- logger.error(f"Error in documentation generation: {str(e)}", exc_info=True)
- return {
- "feedback": f"Error during documentation generation: {str(e)}",
- "documents": []
- }
-
- def _extractDocumentContext(self, documents: List[Dict[str, Any]]) -> str:
- """
- Extract context from input documents, focusing on dataExtracted.
-
- Args:
- documents: List of document objects
-
- Returns:
- Extracted context as text
- """
- contextParts = []
-
- for doc in documents:
- docName = doc.get("name", "unnamed")
- if doc.get("ext"):
- docName = f"{docName}.{doc.get('ext')}"
-
- contextParts.append(f"\n\n--- {docName} ---\n")
-
- # Process contents for dataExtracted
- for content in doc.get("contents", []):
- if content.get("dataExtracted"):
- contextParts.append(content.get("dataExtracted", ""))
-
- return "\n".join(contextParts)
-
- def _sanitizeFilename(self, filename: str) -> str:
- """
- Sanitize a filename by removing invalid characters.
-
- Args:
- filename: Filename to sanitize
-
- Returns:
- Sanitized filename
- """
- # Replace invalid characters with underscores
- invalidChars = r'<>:"/\|?*'
- for char in invalidChars:
- filename = filename.replace(char, '_')
-
- # Trim filename if too long
- if len(filename) > 100:
- filename = filename[:97] + "..."
-
- return filename
-
- async def _analyzeTask(self, prompt: str, context: str, outputSpecs: List) -> Dict:
- """
- Use AI to analyze the task and create a documentation plan.
-
- Args:
- prompt: The task prompt
- context: Document context
- outputSpecs: Output specifications
-
- Returns:
- Documentation plan dictionary
- """
- analysisPrompt = f"""
- Analyze this documentation task and create a detailed plan.
-
- TASK: {prompt}
-
- DOCUMENT CONTEXT SAMPLE:
- {context[:1000]}... (truncated)
-
- OUTPUT REQUIREMENTS:
- {json.dumps(outputSpecs, indent=2)}
-
- Create a detailed documentation plan in JSON format with the following structure:
- {{
- "title": "Document Title",
- "documentType": "report|manual|guide|whitepaper|etc",
- "audience": "technical|general|executive|etc",
- "detailedStructure": [
- {{
- "title": "Chapter/Section Title",
- "keyPoints": ["point1", "point2", ...],
- "subsections": ["subsection1", "subsection2", ...],
- "importance": "high|medium|low",
- "estimatedLength": "short|medium|long"
- }},
- ... more sections ...
- ],
- "keyTopics": ["topic1", "topic2", ...],
- "tone": "formal|conversational|instructional|etc",
- "recommendedFormat": "markdown|html|text|etc",
- "formattingRequirements": ["requirement1", "requirement2", ...],
- "executiveSummary": "Brief description of what the document will cover",
- "feedback": "Brief message explaining the documentation approach"
- }}
-
- Only return valid JSON. No preamble or explanations.
- """
-
- try:
- response = await self.mydom.callAi([
- {"role": "system", "content": "You are a documentation expert. Respond with valid JSON only."},
- {"role": "user", "content": analysisPrompt}
- ])
-
- # Extract JSON from response
- jsonStart = response.find('{')
- jsonEnd = response.rfind('}') + 1
-
- if jsonStart >= 0 and jsonEnd > jsonStart:
- plan = json.loads(response[jsonStart:jsonEnd])
- return plan
- else:
- # Fallback if JSON not found
- return {
- "title": "Documentation",
- "documentType": "report",
- "audience": "general",
- "detailedStructure": [
- {
- "title": "Introduction",
- "keyPoints": ["Purpose", "Scope"],
- "subsections": [],
- "importance": "high",
- "estimatedLength": "short"
- },
- {
- "title": "Main Content",
- "keyPoints": ["Core Information"],
- "subsections": ["Key Findings", "Analysis"],
- "importance": "high",
- "estimatedLength": "long"
- },
- {
- "title": "Conclusion",
- "keyPoints": ["Summary", "Next Steps"],
- "subsections": [],
- "importance": "medium",
- "estimatedLength": "short"
- }
- ],
- "keyTopics": ["General Information"],
- "tone": "formal",
- "recommendedFormat": "markdown",
- "formattingRequirements": ["Clear headings", "Professional formatting"],
- "executiveSummary": "A comprehensive documentation covering the requested topics.",
- "feedback": "Created documentation based on your requirements."
- }
-
- except Exception as e:
- logger.warning(f"Error creating documentation plan: {str(e)}")
- return {
- "title": "Documentation",
- "documentType": "report",
- "audience": "general",
- "detailedStructure": [
- {
- "title": "Introduction",
- "keyPoints": ["Purpose", "Scope"],
- "subsections": [],
- "importance": "high",
- "estimatedLength": "short"
- },
- {
- "title": "Main Content",
- "keyPoints": ["Core Information"],
- "subsections": ["Key Findings", "Analysis"],
- "importance": "high",
- "estimatedLength": "long"
- },
- {
- "title": "Conclusion",
- "keyPoints": ["Summary", "Next Steps"],
- "subsections": [],
- "importance": "medium",
- "estimatedLength": "short"
- }
- ],
- "keyTopics": ["General Information"],
- "tone": "formal",
- "recommendedFormat": "markdown",
- "formattingRequirements": ["Clear headings", "Professional formatting"],
- "executiveSummary": "A comprehensive documentation covering the requested topics.",
- "feedback": "Created documentation based on your requirements."
- }
-
- async def _createDocumentMultiStep(self, prompt: str, context: str, outputLabel: str,
- outputDescription: str, documentationPlan: Dict) -> Dict:
- """
- Create a document using a multi-step approach with separate AI calls for each section.
-
- Args:
- prompt: Original task prompt
- context: Document context
- outputLabel: Output filename
- outputDescription: Description of desired output
- documentationPlan: Documentation plan from AI
-
- Returns:
- Document object
- """
- # Determine format from filename
- formatType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "md"
-
- # Map format to contentType
- contentTypeMap = {
- "md": "text/markdown",
- "markdown": "text/markdown",
- "html": "text/html",
- "txt": "text/plain",
- "text": "text/plain",
- "json": "application/json",
- "csv": "text/csv"
- }
-
- contentType = contentTypeMap.get(formatType, "text/plain")
-
- # Get document information
- title = documentationPlan.get("title", "Documentation")
- documentType = documentationPlan.get("documentType", "document")
- audience = documentationPlan.get("audience", "general")
- tone = documentationPlan.get("tone", "formal")
- keyTopics = documentationPlan.get("keyTopics", [])
- formattingRequirements = documentationPlan.get("formattingRequirements", [])
-
- # Get the detailed structure
- detailedStructure = documentationPlan.get("detailedStructure", [])
- if not detailedStructure:
- # Fallback structure if none provided
- detailedStructure = [
- {
- "title": "Introduction",
- "keyPoints": ["Purpose", "Scope"],
- "importance": "high"
- },
- {
- "title": "Main Content",
- "keyPoints": ["Core Information"],
- "importance": "high"
- },
- {
- "title": "Conclusion",
- "keyPoints": ["Summary", "Next Steps"],
- "importance": "medium"
- }
- ]
-
- try:
- # Step 1: Generate document introduction
- introPrompt = f"""
- Create the introduction for a {documentType} titled "{title}".
-
- DOCUMENT OVERVIEW:
- - Type: {documentType}
- - Audience: {audience}
- - Tone: {tone}
- - Key Topics: {', '.join(keyTopics)}
- - Format: {formatType}
-
- TASK CONTEXT: {prompt}
-
- This introduction should:
- 1. Clearly state the purpose and scope of the document
- 2. Provide context and background information
- 3. Outline what the reader will find in the document
- 4. Set the appropriate tone for the {audience} audience
-
- The introduction should be professional and engaging, formatted according to {formatType} standards.
- """
-
- introduction = await self.mydom.callAi([
- {"role": "system", "content": f"You are a documentation expert creating an introduction in {formatType} format."},
- {"role": "user", "content": introPrompt}
- ], produceUserAnswer = True)
-
- # Step 2: Generate executive summary (if applicable)
- if documentType in ["report", "whitepaper", "case study"]:
- summaryPrompt = f"""
- Create an executive summary for a {documentType} titled "{title}".
-
- DOCUMENT OVERVIEW:
- - Type: {documentType}
- - Audience: {audience}
- - Key Topics: {', '.join(keyTopics)}
-
- TASK CONTEXT: {prompt}
-
- This executive summary should:
- 1. Provide a concise overview of the entire document
- 2. Highlight key findings, recommendations, or conclusions
- 3. Be suitable for executives or busy readers who may only read this section
- 4. Be professionally formatted according to {formatType} standards
-
- Keep the summary focused and impactful, approximately 200-300 words.
- """
-
- executiveSummary = await self.mydom.callAi([
- {"role": "system", "content": f"You are a documentation expert creating an executive summary in {formatType} format."},
- {"role": "user", "content": summaryPrompt}
- ], produceUserAnswer = True)
- else:
- executiveSummary = ""
-
- # Step 3: Generate each section
- sections = []
-
- for section in detailedStructure:
- sectionTitle = section.get("title", "Section")
- keyPoints = section.get("keyPoints", [])
- subsections = section.get("subsections", [])
- importance = section.get("importance", "medium")
-
- # Adjust depth based on importance
- detailLevel = "high" if importance == "high" else "medium"
-
- sectionPrompt = f"""
- Create the "{sectionTitle}" section for a {documentType} titled "{title}".
-
- SECTION DETAILS:
- - Title: {sectionTitle}
- - Key Points to Cover: {', '.join(keyPoints)}
- - Subsections: {', '.join(subsections)}
- - Detail Level: {detailLevel}
-
- DOCUMENT CONTEXT:
- - Type: {documentType}
- - Audience: {audience}
- - Tone: {tone}
- - Format: {formatType}
-
- TASK CONTEXT: {prompt}
-
- AVAILABLE INFORMATION:
- {context[:500]}... (truncated)
-
- This section should:
- 1. Be comprehensive and well-structured
- 2. Cover all the key points listed
- 3. Include the specified subsections with appropriate headings
- 4. Maintain a {tone} tone suitable for the {audience} audience
- 5. Be properly formatted according to {formatType} standards
- 6. Include specific examples, data, or evidence where appropriate
-
- Be thorough in your coverage of this section, providing substantive content.
- """
-
- sectionContent = await self.mydom.callAi([
- {"role": "system", "content": f"You are a documentation expert creating detailed content for the {sectionTitle} section."},
- {"role": "user", "content": sectionPrompt}
- ], produceUserAnswer = True)
-
- sections.append(sectionContent)
-
- # Step 4: Generate conclusion
- conclusionPrompt = f"""
- Create the conclusion for a {documentType} titled "{title}".
-
- DOCUMENT OVERVIEW:
- - Type: {documentType}
- - Audience: {audience}
- - Key Topics: {', '.join(keyTopics)}
-
- TASK CONTEXT: {prompt}
-
- This conclusion should:
- 1. Summarize the key points covered in the document
- 2. Provide closure to the topics discussed
- 3. Include any relevant recommendations or next steps
- 4. Leave the reader with a clear understanding of the document's significance
-
- The conclusion should be professional and impactful, formatted according to {formatType} standards.
- """
-
- conclusion = await self.mydom.callAi([
- {"role": "system", "content": f"You are a documentation expert creating a conclusion in {formatType} format."},
- {"role": "user", "content": conclusionPrompt}
- ], produceUserAnswer = True)
-
- # Step 5: Assemble the complete document
- if formatType in ["md", "markdown"]:
- # Markdown format
- documentContent = f"# {title}\n\n"
-
- if executiveSummary:
- documentContent += f"## Executive Summary\n\n{executiveSummary}\n\n"
-
- documentContent += f"{introduction}\n\n"
-
- for i, sectionContent in enumerate(sections):
- # Ensure section starts with heading if not already
- sectionTitle = detailedStructure[i].get("title", f"Section {i+1}")
- if not sectionContent.strip().startswith("#"):
- documentContent += f"## {sectionTitle}\n\n"
- documentContent += f"{sectionContent}\n\n"
-
- documentContent += f"## Conclusion\n\n{conclusion}\n"
-
- elif formatType == "html":
- # HTML format
- documentContent = f"\n\n{title}\n\n\n"
- documentContent += f"
{title}
\n\n"
-
- if executiveSummary:
- documentContent += f"
Executive Summary
\n
{executiveSummary}
\n\n"
-
- documentContent += f"
{introduction}
\n\n"
-
- for i, sectionContent in enumerate(sections):
- sectionTitle = detailedStructure[i].get("title", f"Section {i+1}")
- documentContent += f"
{sectionTitle}
\n
{sectionContent}
\n\n"
-
- documentContent += f"
Conclusion
\n
{conclusion}
\n"
- documentContent += "\n"
-
- else:
- # Plain text format
- documentContent = f"{title}\n{'=' * len(title)}\n\n"
-
- if executiveSummary:
- documentContent += f"EXECUTIVE SUMMARY\n{'-' * 17}\n\n{executiveSummary}\n\n"
-
- documentContent += f"{introduction}\n\n"
-
- for i, sectionContent in enumerate(sections):
- sectionTitle = detailedStructure[i].get("title", f"Section {i+1}")
- documentContent += f"{sectionTitle}\n{'-' * len(sectionTitle)}\n\n{sectionContent}\n\n"
-
- documentContent += f"CONCLUSION\n{'-' * 10}\n\n{conclusion}\n"
-
- # Create document object
- return self.formatAgentDocumentOutput(outputLabel, documentContent, contentType)
-
- except Exception as e:
- logger.error(f"Error creating document: {str(e)}", exc_info=True)
-
- # Create a simple error document
- if formatType in ["md", "markdown"]:
- content = f"# Error in Documentation\n\nThere was an error generating the documentation: {str(e)}"
- elif formatType == "html":
- content = f"
Error in Documentation
There was an error generating the documentation: {str(e)}
"
- else:
- content = f"Error in Documentation\n\nThere was an error generating the documentation: {str(e)}"
-
- return {
- "label": outputLabel,
- "content": content,
- "metadata": {
- "contentType": contentType
- }
- }
-
-
-# Factory function for the Documentation agent
-def getAgentDocumentation():
- """Returns an instance of the Documentation agent."""
- return AgentDocumentation()
\ No newline at end of file
diff --git a/static/121_auth.py b/static/121_auth.py
deleted file mode 100644
index 6fdf7e2f..00000000
--- a/static/121_auth.py
+++ /dev/null
@@ -1,158 +0,0 @@
-"""
-Authentication module for backend API.
-Handles JWT-based authentication, token generation, and user context.
-"""
-
-from datetime import datetime, timedelta, timezone
-from typing import Optional, Dict, Any, Tuple
-from fastapi import Depends, HTTPException, status
-from fastapi.security import OAuth2PasswordBearer
-from jose import JWTError, jwt
-import logging
-
-from modules.gatewayInterface import getGatewayInterface
-from modules.configuration import APP_CONFIG
-
-# Get Config Data
-SECRET_KEY = APP_CONFIG.get("APP_JWT_SECRET_SECRET")
-ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM")
-ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY"))
-
-# OAuth2 Setup
-oauth2Scheme = OAuth2PasswordBearer(tokenUrl="token")
-
-# Logger
-logger = logging.getLogger(__name__)
-
-def createAccessToken(data: dict, expiresDelta: Optional[timedelta] = None) -> str:
- """
- Creates a JWT Access Token.
-
- Args:
- data: Data to encode (usually user ID or username)
- expiresDelta: Validity duration of the token (optional)
-
- Returns:
- JWT Token as string
- """
- toEncode = data.copy()
-
- if expiresDelta:
- expire = datetime.now(timezone.utc) + expiresDelta
- else:
- expire = datetime.now(timezone.utc) + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
-
- toEncode.update({"exp": expire})
- encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM)
-
- return encodedJwt
-
-async def getCurrentUser(token: str = Depends(oauth2Scheme)) -> Dict[str, Any]:
- """
- Extracts and validates the current user from the JWT token.
-
- Args:
- token: JWT Token from the Authorization header
-
- Returns:
- User data
-
- Raises:
- HTTPException: For invalid token or user
- """
- credentialsException = HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Invalid authentication credentials",
- headers={"WWW-Authenticate": "Bearer"},
- )
-
- try:
- # Decode token
- payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
-
- # Extract username from token
- username: str = payload.get("sub")
- if username is None:
- raise credentialsException
-
- # Extract mandate ID from token (if present)
- mandateId: int = payload.get("mandateId", 1) # Default: Root mandate
-
- except JWTError:
- logger.warning("Invalid JWT Token")
- raise credentialsException
-
- # Initialize Gateway Interface without context
- gateway = getGatewayInterface()
-
- # Retrieve user from database
- user = gateway.getUserByUsername(username)
-
- if user is None:
- logger.warning(f"User {username} not found")
- raise credentialsException
-
- if user.get("disabled", False):
- logger.warning(f"User {username} is disabled")
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled")
-
- return user
-
-async def getCurrentActiveUser(currentUser: Dict[str, Any] = Depends(getCurrentUser)) -> Dict[str, Any]:
- """
- Ensures that the user is active.
-
- Args:
- currentUser: Current user data
-
- Returns:
- User data
-
- Raises:
- HTTPException: If the user is disabled
- """
- if currentUser.get("disabled", False):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled")
-
- return currentUser
-
-async def getUserContext(currentUser: Dict[str, Any]) -> Tuple[int, int]:
- """
- Extracts the mandate ID and user ID from the current user.
- Enhanced with better logging.
-
- Args:
- currentUser: The current user
-
- Returns:
- Tuple of (mandateId, userId)
- """
- # Default values
- defaultMandateId = 0
- defaultUserId = 0
-
- # Extract mandateId
- mandateId = currentUser.get("mandateId", None)
- if mandateId is None:
- logger.warning(f"No mandateId found in currentUser, using default: {defaultMandateId}")
- mandateId = defaultMandateId
- else:
- try:
- mandateId = int(mandateId)
- except (ValueError, TypeError):
- logger.error(f"Invalid mandateId value: {mandateId}, using default: {defaultMandateId}")
- mandateId = defaultMandateId
-
- # Extract userId
- userId = currentUser.get("id", None)
- if userId is None:
- logger.warning(f"No userId found in currentUser, using default: {defaultUserId}")
- userId = defaultUserId
- else:
- try:
- userId = int(userId)
- except (ValueError, TypeError):
- logger.error(f"Invalid userId value: {userId}, using default: {defaultUserId}")
- userId = defaultUserId
-
- return mandateId, userId
\ No newline at end of file
diff --git a/static/122_configuration.py b/static/122_configuration.py
deleted file mode 100644
index f7e9b81f..00000000
--- a/static/122_configuration.py
+++ /dev/null
@@ -1,183 +0,0 @@
-"""
-Utility module for configuration management.
-
-This module provides a global APP_CONFIG object for accessing configuration from both
-config.ini files and environment variables stored in .env files, using a flat structure.
-"""
-
-import os
-import logging
-from typing import Any, Dict, Optional
-from pathlib import Path
-
-# Set up logging
-logger = logging.getLogger(__name__)
-
-class Configuration:
- """
- Configuration class with attribute-style access to flattened configuration.
- """
- def __init__(self):
- """Initialize the configuration object"""
- self._data = {}
- self._configFilePath = None
- self._envFilePath = None
- self._configMtime = 0
- self._envMtime = 0
- self.refresh()
-
- def refresh(self):
- """Reload configuration from files"""
- self._loadConfig()
- self._loadEnv()
- logger.info("Configuration refreshed")
-
- def _loadConfig(self):
- """Load configuration from config.ini file in flattened format"""
- # Find config.ini file (look in current directory and parent directory)
- configPath = Path('config.ini')
- if not configPath.exists():
- # Try in parent directory
- configPath = Path('../config.ini')
- if not configPath.exists():
- logger.warning(f"Configuration file not found at {configPath.absolute()}")
- return
-
- self._configFilePath = configPath
- currentMtime = os.path.getmtime(configPath)
-
- # Skip if file hasn't changed
- if currentMtime <= self._configMtime:
- return
-
- self._configMtime = currentMtime
-
- try:
- with open(configPath, 'r') as f:
- for line in f:
- line = line.strip()
- # Skip empty lines and comments
- if not line or line.startswith('#'):
- continue
-
- # Parse key-value pairs
- if '=' in line:
- key, value = line.split('=', 1)
- key = key.strip()
- value = value.strip()
-
- # Add directly to data dictionary
- self._data[key] = value
-
-
- except Exception as e:
- logger.error(f"Error loading configuration: {e}")
-
- def _loadEnv(self):
- """Load environment variables from .env file"""
- # Find .env file (look in current directory and parent directory)
- envPath = Path('.env')
- if not envPath.exists():
- # Try in parent directory
- envPath = Path('../.env')
- if not envPath.exists():
- logger.warning(f"Environment file not found at {envPath.absolute()}")
- return
-
- self._envFilePath = envPath
- currentMtime = os.path.getmtime(envPath)
-
- # Skip if file hasn't changed
- if currentMtime <= self._envMtime:
- return
-
- self._envMtime = currentMtime
-
- try:
- with open(envPath, 'r') as f:
- for line in f:
- line = line.strip()
- # Skip empty lines and comments
- if not line or line.startswith('#'):
- continue
-
- # Parse key-value pairs
- if '=' in line:
- key, value = line.split('=', 1)
- key = key.strip()
- value = value.strip()
-
- # Add directly to data dictionary
- self._data[key] = value
-
- logger.info(f"Loaded environment variables from {envPath.absolute()}")
-
- # Also load system environment variables (don't override existing)
- for key, value in os.environ.items():
- if key not in self._data:
- self._data[key] = value
-
- except Exception as e:
- logger.error(f"Error loading environment variables: {e}")
-
- def checkForUpdates(self):
- """Check if configuration files have changed and reload if necessary"""
- if self._configFilePath and os.path.exists(self._configFilePath):
- currentMtime = os.path.getmtime(self._configFilePath)
- if currentMtime > self._configMtime:
- logger.info("Config file has changed, reloading...")
- self._loadConfig()
-
- if self._envFilePath and os.path.exists(self._envFilePath):
- currentMtime = os.path.getmtime(self._envFilePath)
- if currentMtime > self._envMtime:
- logger.info("Environment file has changed, reloading...")
- self._loadEnv()
-
- def get(self, key: str, default: Any = None) -> Any:
- """Get configuration value with optional default"""
- self.checkForUpdates() # Check for file changes
-
- if key in self._data:
- value = self._data[key]
- # Handle secrets (keys ending with _SECRET)
- if key.endswith("_SECRET"):
- return handleSecret(value)
- return value
- return default
-
- def __getattr__(self, name: str) -> Any:
- """Enable attribute-style access to configuration"""
- self.checkForUpdates() # Check for file changes
-
- value = self.get(name)
- if value is None:
- raise AttributeError(f"Configuration key '{name}' not found")
- return value
-
- def __dir__(self) -> list:
- """Support auto-completion of attributes"""
- self.checkForUpdates() # Check for file changes
- return list(self._data.keys()) + super().__dir__()
-
- def set(self, key: str, value: Any) -> None:
- """Set a configuration value (for testing/overrides)"""
- self._data[key] = value
-
-def handleSecret(value: str) -> str:
- """
- Handle secret values. Currently just returns the plain text value,
- but can be enhanced to provide actual decryption in the future.
-
- Args:
- value: The secret value to handle
-
- Returns:
- str: Processed secret value
- """
- # For now, just return the value as-is
- # In the future, this could be enhanced to decrypt values
- return value
-
-# Create the global APP_CONFIG instance
-APP_CONFIG = Configuration()
\ No newline at end of file
diff --git a/static/123_agentWebcrawler.py b/static/123_agentWebcrawler.py
deleted file mode 100644
index 5cce8176..00000000
--- a/static/123_agentWebcrawler.py
+++ /dev/null
@@ -1,796 +0,0 @@
-"""
-Webcrawler agent for research and retrieval of information from the web.
-Reimagined with an output-first, AI-driven approach.
-"""
-
-import logging
-import json
-import re
-import time
-from typing import Dict, Any, List
-from urllib.parse import quote_plus, unquote
-
-from bs4 import BeautifulSoup
-import requests
-import markdown
-
-from modules.workflowAgentsRegistry import AgentBase
-from modules.configuration import APP_CONFIG
-
-logger = logging.getLogger(__name__)
-
-class AgentWebcrawler(AgentBase):
- """AI-driven agent for web research and information retrieval"""
-
- def __init__(self):
- """Initialize the webcrawler agent"""
- super().__init__()
- self.name = "webcrawler"
- self.description = "Conducts web research and collects information from online sources"
- self.capabilities = [
- "webSearch",
- "informationRetrieval",
- "dataCollection",
- "searchResultsAnalysis",
- "webpageContentExtraction"
- ]
-
- # Web crawling configuration
- self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_MAX_URLS", "5"))
- self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_KEYWORDS", "3"))
- self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_RESULTS", "5"))
- self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_TIMEOUT", "30"))
- self.searchEngine = APP_CONFIG.get("Agent_Webcrawler_SEARCH_ENGINE", "https://html.duckduckgo.com/html/?q=")
- self.userAgent = APP_CONFIG.get("Agent_Webcrawler_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")
-
- def setDependencies(self, mydom=None):
- """Set external dependencies for the agent."""
- self.mydom = mydom
-
- async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
- """
- Process a task by focusing on required outputs and using AI to guide the research process.
-
- Args:
- task: Task dictionary with prompt, inputDocuments, outputSpecifications
-
- Returns:
- Dictionary with feedback and documents
- """
- try:
- # Extract task information
- prompt = task.get("prompt", "")
- outputSpecs = task.get("outputSpecifications", [])
-
- # Check AI service
- if not self.mydom:
- return {
- "feedback": "The Webcrawler agent requires an AI service to function effectively.",
- "documents": []
- }
-
- # Create research plan
- researchPlan = await self._createResearchPlan(prompt)
-
- # Check if this is truly a web research task
- if not researchPlan.get("requiresWebResearch", True):
- return {
- "feedback": "This task doesn't appear to require web research. Please try a different agent.",
- "documents": []
- }
-
- # Gather raw material through web research
- rawResults = await self._gatherResearchMaterial(researchPlan)
-
- # Format results into requested output documents
- documents = await self._createOutputDocuments(
- prompt,
- rawResults,
- outputSpecs,
- researchPlan
- )
-
- # Generate feedback
- feedback = researchPlan.get("feedback", f"I conducted web research on '{prompt[:50]}...' and gathered information from {len(rawResults)} relevant sources.")
-
- return {
- "feedback": feedback,
- "documents": documents
- }
-
- except Exception as e:
- logger.error(f"Error during web research: {str(e)}", exc_info=True)
- return {
- "feedback": f"Error during web research: {str(e)}",
- "documents": []
- }
-
- async def _createResearchPlan(self, prompt: str) -> Dict[str, Any]:
- """
- Use AI to create a detailed research plan.
-
- Args:
- prompt: The research query
-
- Returns:
- Research plan dictionary
- """
- researchPrompt = f"""
- Create a detailed web research plan for this task: "{prompt}"
-
- Analyze the request carefully and create a structured plan in JSON format with the following elements:
- {{
- "requiresWebResearch": true/false, # Whether this genuinely requires web research
- "researchQuestions": ["question1", "question2", ...], # 2-4 specific questions to answer
- "searchTerms": ["term1", "term2", ...], # Up to {self.maxSearchTerms} effective search terms
- "directUrls": ["url1", "url2", ...], # Any URLs directly mentioned in the request (up to {self.maxUrl})
- "expectedSources": ["type1", "type2", ...], # Types of sources that would be most valuable
- "contentFocus": "what specific content to extract or focus on",
- "feedback": "explanation of how the research will be conducted"
- }}
-
- Respond with ONLY the JSON object, no additional text or explanations.
- """
-
- try:
- # Get research plan from AI
- response = await self.mydom.callAi([
- {"role": "system", "content": "You are a web research planning expert. Create precise research plans in JSON format only."},
- {"role": "user", "content": researchPrompt}
- ])
-
- # Extract JSON
- jsonStart = response.find('{')
- jsonEnd = response.rfind('}') + 1
-
- if jsonStart >= 0 and jsonEnd > jsonStart:
- plan = json.loads(response[jsonStart:jsonEnd])
-
- # Ensure we have the expected fields with defaults if missing
- if "searchTerms" not in plan:
- plan["searchTerms"] = [prompt]
- if "directUrls" not in plan:
- plan["directUrls"] = []
- if "researchQuestions" not in plan:
- plan["researchQuestions"] = ["What information can be found about this topic?"]
-
- return plan
- else:
- # Fallback plan
- logger.warning(f"Not able creating research plan, generating fallback plan")
- return {
- "requiresWebResearch": True,
- "researchQuestions": ["What information can be found about this topic?"],
- "searchTerms": [prompt],
- "directUrls": [],
- "expectedSources": ["Web pages", "Articles"],
- "contentFocus": "Relevant information about the topic",
- "feedback": f"I'll conduct web research on '{prompt}' and gather relevant information."
- }
-
- except Exception as e:
- logger.warning(f"Error creating research plan: {str(e)}")
- # Simple fallback plan
- return {
- "requiresWebResearch": True,
- "researchQuestions": ["What information can be found about this topic?"],
- "searchTerms": [prompt],
- "directUrls": [],
- "expectedSources": ["Web pages", "Articles"],
- "contentFocus": "Relevant information about the topic",
- "feedback": f"I'll conduct web research on '{prompt}' and gather relevant information."
- }
-
- async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- Gather research material based on the research plan.
-
- Args:
- researchPlan: Research plan dictionary
-
- Returns:
- List of research results
- """
- allResults = []
-
- # Process direct URLs
- directUrls = researchPlan.get("directUrls", [])[:self.maxUrl]
- for url in directUrls:
- logger.info(f"Processing direct URL: {url}")
- try:
- # Fetch and extract content
- soup = self._readUrl(url)
-
- if soup:
- # Extract title and content
- title = self._extractTitle(soup, url)
- content = self._extractMainContent(soup)
-
- # Add to results
- allResults.append({
- "title": title,
- "url": url,
- "sourceType": "directUrl",
- "content": content,
- "summary": "" # Will be filled later
- })
- except Exception as e:
- logger.warning(f"Error processing URL {url}: {str(e)}")
-
- # Process search terms
- searchTerms = researchPlan.get("searchTerms", [])[:self.maxSearchTerms]
- for term in searchTerms:
- logger.info(f"Searching for: {term}")
- try:
- # Perform search
- searchResults = self._searchWeb(term)
-
- # Process each search result
- for result in searchResults:
- # Check if URL is already in results
- if not any(r["url"] == result["url"] for r in allResults):
- allResults.append({
- "title": result["title"],
- "url": result["url"],
- "sourceType": "searchResult",
- "content": result["data"],
- "snippet": result["snippet"],
- "summary": "" # Will be filled later
- })
-
- # Stop if we've reached the maximum results
- if len(allResults) >= self.maxResults:
- break
- except Exception as e:
- logger.warning(f"Error searching for {term}: {str(e)}")
-
- # Stop if we've reached the maximum results
- if len(allResults) >= self.maxResults:
- break
-
- # Create summaries in parallel for all results
- allResults = await self._summarizeAllResults(allResults, researchPlan)
-
- return allResults
-
- async def _summarizeAllResults(self, results: List[Dict[str, Any]], researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- Create summaries for all research results.
-
- Args:
- results: List of research results
- researchPlan: Research plan with questions and focus
-
- Returns:
- Results with added summaries
- """
- for i, result in enumerate(results):
- logger.info(f"Summarizing result {i+1}/{len(results)}: {result['title'][:30]}...")
-
- try:
- # Limit content length to avoid token issues
- content = self._limitText(result.get("content", ""), maxChars=8000)
- researchQuestions = researchPlan.get("researchQuestions", ["What relevant information does this page contain?"])
- contentFocus = researchPlan.get("contentFocus", "Relevant information")
-
- # Create summary using AI
- summaryPrompt = f"""
- Summarize this web page content based on these research questions:
- {', '.join(researchQuestions)}
-
- Focus on: {contentFocus}
-
- Web page: {result['url']}
- Title: {result['title']}
-
- Content:
- {content}
-
- Create a concise summary that:
- 1. Directly answers the research questions if possible
- 2. Extracts the most relevant information from the page
- 3. Includes specific facts, figures, or quotes if available
- 4. Is around 2000 characters long
-
- Only include information actually found in the content. No fabrications or assumptions.
- """
-
- if self.mydom:
- summary = await self.mydom.callAi([
- {"role": "system", "content": "You summarize web content accurately and concisely, focusing only on what is actually in the content."},
- {"role": "user", "content": summaryPrompt}
- ])
-
- # Store the summary
- result["summary"] = summary
- else:
- # Fallback if no AI service
- logger.warning(f"Not able to summarize result, using fallback plan.")
- result["summary"] = f"Content from {result['url']} ({len(content)} characters)"
-
- except Exception as e:
- logger.warning(f"Error summarizing result {i+1}: {str(e)}")
- result["summary"] = f"Error creating summary: {str(e)}"
-
- return results
-
- async def _createOutputDocuments(self, prompt: str, results: List[Dict[str, Any]],
- outputSpecs: List[Dict[str, Any]], researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- Create output documents based on research results and specifications.
-
- Args:
- prompt: Original research prompt
- results: List of research results
- outputSpecs: Output specifications
- researchPlan: Research plan
-
- Returns:
- List of output documents
- """
- # If no output specs provided, create default output
- if not outputSpecs:
- outputSpecs = [{
- "label": "webResearchResults.md",
- "description": "Comprehensive web research results"
- }]
-
- # Generate documents
- documents = []
-
- # Process each output specification
- for spec in outputSpecs:
- outputLabel = spec.get("label", "")
- outputDescription = spec.get("description", "")
-
- # Determine format based on file extension
- formatType = self._determineFormatType(outputLabel)
-
- # Create appropriate document based on format
- if formatType == "json":
- # JSON output - structured data
- document = await self._createJsonDocument(prompt, results, researchPlan, outputLabel)
- elif formatType == "csv":
- # CSV output - tabular data
- document = await self._createCsvDocument(results, outputLabel)
- else:
- # Text-based output (markdown, html, text) - narrative report
- document = await self._createNarrativeDocument(
- prompt, results, researchPlan, formatType, outputLabel, outputDescription
- )
-
- documents.append(document)
-
- return documents
-
- async def _createNarrativeDocument(self, prompt: str, results: List[Dict[str, Any]],
- researchPlan: Dict[str, Any], formatType: str,
- outputLabel: str, outputDescription: str) -> Dict[str, Any]:
- """
- Create a narrative document (markdown, html, text) from research results.
-
- Args:
- prompt: Original research prompt
- results: Research results
- researchPlan: Research plan
- formatType: Output format (markdown, html, text)
- outputLabel: Output filename
- outputDescription: Output description
-
- Returns:
- Document object
- """
- # Create content based on format
- if formatType == "markdown":
- contentType = "text/markdown"
- templateFormat = "markdown"
- elif formatType == "html":
- contentType = "text/html"
- templateFormat = "html"
- else:
- contentType = "text/plain"
- templateFormat = "text"
-
- # Prepare research context
- researchQuestions = researchPlan.get("researchQuestions", [])
- searchTerms = researchPlan.get("searchTerms", [])
-
- # Create document structure based on results
- sourcesSummary = []
- for result in results:
- sourcesSummary.append({
- "title": result.get("title", "Untitled"),
- "url": result.get("url", ""),
- "summary": result.get("summary", ""),
- "snippet": result.get("snippet", "")
- })
-
- # Truncate content for prompt
- sourcesJson = json.dumps(sourcesSummary, indent=2)
- if len(sourcesJson) > 10000:
- # Logic to truncate each summary while preserving structure
- for i in range(len(sourcesSummary)):
- if len(sourcesJson) <= 10000:
- break
- # Gradually truncate summaries
- sourcesSummary[i]["summary"] = sourcesSummary[i]["summary"][:500] + "..."
- sourcesJson = json.dumps(sourcesSummary, indent=2)
-
- # Create report prompt
- reportPrompt = f"""
- Create a comprehensive {formatType} research report based on the following web research:
-
- TASK: {prompt}
-
- RESEARCH QUESTIONS:
- {', '.join(researchQuestions)}
-
- SEARCH TERMS USED:
- {', '.join(searchTerms)}
-
- SOURCES AND FINDINGS:
- {sourcesJson}
-
- REPORT DETAILS:
- - Format: {templateFormat}
- - Filename: {outputLabel}
- - Description: {outputDescription}
-
- Create a well-structured report that:
- 1. Includes an executive summary of key findings
- 2. Addresses each research question directly
- 3. Integrates information from all relevant sources
- 4. Cites sources appropriately for each piece of information
- 5. Provides a comprehensive synthesis of the research
- 6. Is formatted professionally and appropriately for {templateFormat}
-
- The report should be scholarly, accurate, and focused on the original research task.
- """
-
- try:
- # Generate report with AI
- reportContent = await self.mydom.callAi([
- {"role": "system", "content": f"You create professional research reports in {templateFormat} format."},
- {"role": "user", "content": reportPrompt}
- ])
-
- # Convert to HTML if needed
- if formatType == "html" and not reportContent.lower().startswith("Web Research Results{reportContent}"
-
- return self.formatAgentDocumentOutput(outputLabel, reportContent, contentType)
-
- except Exception as e:
- logger.error(f"Error creating narrative document: {str(e)}")
- # Create error document
- if formatType == "markdown":
- content = f"# Web Research Error\n\nAn error occurred: {str(e)}"
- elif formatType == "html":
- content = f"
Web Research Error
An error occurred: {str(e)}
"
- else:
- content = f"WEB RESEARCH ERROR\n\nAn error occurred: {str(e)}"
-
- return self.formatAgentDocumentOutput(outputLabel, content, contentType)
-
- async def _createJsonDocument(self, prompt: str, results: List[Dict[str, Any]],
- researchPlan: Dict[str, Any], outputLabel: str) -> Dict[str, Any]:
- """
- Create a JSON document from research results.
-
- Args:
- prompt: Original research prompt
- results: Research results
- researchPlan: Research plan
- outputLabel: Output filename
-
- Returns:
- Document object
- """
- try:
- # Create structured data
- sourcesData = []
- for result in results:
- sourcesData.append({
- "title": result.get("title", "Untitled"),
- "url": result.get("url", ""),
- "summary": result.get("summary", ""),
- "snippet": result.get("snippet", ""),
- "sourceType": result.get("sourceType", "")
- })
-
- # Create metadata
- metadata = {
- "query": prompt,
- "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
- "researchQuestions": researchPlan.get("researchQuestions", []),
- "searchTerms": researchPlan.get("searchTerms", [])
- }
-
- # Compile complete report object
- jsonContent = {
- "metadata": metadata,
- "summary": researchPlan.get("feedback", "Web research results"),
- "sources": sourcesData
- }
-
- # Convert to JSON string
- content = json.dumps(jsonContent, indent=2)
-
- return self.formatAgentDocumentOutput(outputLabel, content, "application/json")
-
- except Exception as e:
- logger.error(f"Error creating JSON document: {str(e)}")
- return self.formatAgentDocumentOutput(outputLabel, json.dumps({"error": str(e)}), "application/json")
-
- async def _createCsvDocument(self, results: List[Dict[str, Any]], outputLabel: str) -> Dict[str, Any]:
- """
- Create a CSV document from research results.
-
- Args:
- results: Research results
- outputLabel: Output filename
-
- Returns:
- Document object
- """
- try:
- # Create CSV header
- csvLines = ["Title,URL,Source Type,Snippet"]
-
- # Add results
- for result in results:
- # Escape CSV fields
- title = result.get("title", "").replace('"', '""')
- url = result.get("url", "").replace('"', '""')
- sourceType = result.get("sourceType", "").replace('"', '""')
- snippet = result.get("snippet", "").replace('"', '""')
-
- csvLines.append(f'"{title}","{url}","{sourceType}","{snippet}"')
-
- # Combine into CSV content
- content = "\n".join(csvLines)
-
- return self.formatAgentDocumentOutput(outputLabel, content, "text/csv")
-
- except Exception as e:
- logger.error(f"Error creating CSV document: {str(e)}")
- return self.formatAgentDocumentOutput(outputLabel, "Error,Error\nFailed to create CSV,{0}".format(str(e)), "text/csv")
-
- def _determineFormatType(self, outputLabel: str) -> str:
- """
- Determine the format type based on the filename.
-
- Args:
- outputLabel: Output filename
-
- Returns:
- Format type (markdown, html, text, json, csv)
- """
- outputLabelLower = outputLabel.lower()
-
- if outputLabelLower.endswith(".md"):
- return "markdown"
- elif outputLabelLower.endswith(".html"):
- return "html"
- elif outputLabelLower.endswith(".txt"):
- return "text"
- elif outputLabelLower.endswith(".json"):
- return "json"
- elif outputLabelLower.endswith(".csv"):
- return "csv"
- else:
- # Default to markdown
- return "markdown"
-
- def _searchWeb(self, query: str) -> List[Dict[str, str]]:
- """
- Conduct a web search and return the results.
-
- Args:
- query: The search query
-
- Returns:
- List of search results
- """
- formattedQuery = quote_plus(query)
- url = f"{self.searchEngine}{formattedQuery}"
-
- searchResultsSoup = self._readUrl(url)
- if not searchResultsSoup or not searchResultsSoup.select('.result'):
- logger.warning(f"No search results found for: {query}")
- return []
-
- # Extract search results
- results = []
-
- # Find all result containers
- resultElements = searchResultsSoup.select('.result')
-
- for result in resultElements:
- # Extract title
- titleElement = result.select_one('.result__a')
- title = titleElement.text.strip() if titleElement else 'No title'
-
- # Extract URL (DuckDuckGo uses redirects)
- urlElement = titleElement.get('href') if titleElement else ''
- extractedUrl = 'No URL'
-
- if urlElement:
- # Extract actual URL from DuckDuckGo's redirect
- if urlElement.startswith('/d.js?q='):
- start = urlElement.find('?q=') + 3
- end = urlElement.find('&', start) if '&' in urlElement[start:] else None
- extractedUrl = unquote(urlElement[start:end])
-
- # Ensure URL has correct protocol prefix
- if not extractedUrl.startswith(('http://', 'https://')):
- if not extractedUrl.startswith('//'):
- extractedUrl = 'https://' + extractedUrl
- else:
- extractedUrl = 'https:' + extractedUrl
- else:
- extractedUrl = urlElement
-
- # Extract snippet directly from search results page
- snippetElement = result.select_one('.result__snippet')
- snippet = snippetElement.text.strip() if snippetElement else 'No description'
-
- # Get actual page content
- try:
- targetPageSoup = self._readUrl(extractedUrl)
- content = self._extractMainContent(targetPageSoup)
- except Exception as e:
- logger.warning(f"Error extracting content from {extractedUrl}: {str(e)}")
- content = f"Error extracting content: {str(e)}"
-
- results.append({
- 'title': title,
- 'url': extractedUrl,
- 'snippet': snippet,
- 'data': content
- })
-
- # Limit number of results
- if len(results) >= self.maxResults:
- break
-
- return results
-
- def _readUrl(self, url: str) -> BeautifulSoup:
- """
- Read a URL and return a BeautifulSoup parser for the content.
-
- Args:
- url: The URL to read
-
- Returns:
- BeautifulSoup object with the content or None on errors
- """
- if not url or not url.startswith(('http://', 'https://')):
- return None
-
- headers = {
- 'User-Agent': self.userAgent,
- 'Accept': 'text/html,application/xhtml+xml,application/xml',
- 'Accept-Language': 'en-US,en;q=0.9',
- }
-
- try:
- # Initial request
- response = requests.get(url, headers=headers, timeout=self.timeout)
-
- # Handling for status 202
- if response.status_code == 202:
- # Retry with backoff
- backoffTimes = [0.5, 1.0, 2.0, 5.0]
-
- for waitTime in backoffTimes:
- time.sleep(waitTime)
- response = requests.get(url, headers=headers, timeout=self.timeout)
-
- if response.status_code != 202:
- break
-
- # Raise for error status codes
- response.raise_for_status()
-
- # Parse HTML
- return BeautifulSoup(response.text, 'html.parser')
-
- except Exception as e:
- logger.error(f"Error reading URL {url}: {str(e)}")
- return None
-
- def _extractTitle(self, soup: BeautifulSoup, url: str) -> str:
- """
- Extract the title from a webpage.
-
- Args:
- soup: BeautifulSoup object of the webpage
- url: URL of the webpage
-
- Returns:
- Extracted title
- """
- if not soup:
- return f"Error with {url}"
-
- # Extract title from title tag
- titleTag = soup.find('title')
- title = titleTag.text.strip() if titleTag else "No title"
-
- # Alternative: Also look for h1 tags if title tag is missing
- if title == "No title":
- h1Tag = soup.find('h1')
- if h1Tag:
- title = h1Tag.text.strip()
-
- return title
-
- def _extractMainContent(self, soup: BeautifulSoup, maxChars: int = 10000) -> str:
- """
- Extract the main content from an HTML page.
-
- Args:
- soup: BeautifulSoup object of the webpage
- maxChars: Maximum number of characters
-
- Returns:
- Extracted main content as a string
- """
- if not soup:
- return ""
-
- # Try to find main content elements in priority order
- mainContent = None
- for selector in ['main', 'article', '#content', '.content', '#main', '.main']:
- content = soup.select_one(selector)
- if content:
- mainContent = content
- break
-
- # If no main content found, use the body
- if not mainContent:
- mainContent = soup.find('body') or soup
-
- # Remove script, style, nav, footer elements that don't contribute to main content
- for element in mainContent.select('script, style, nav, footer, header, aside, .sidebar, #sidebar, .comments, #comments, .advertisement, .ads, iframe'):
- element.extract()
-
- # Extract text content
- textContent = mainContent.get_text(separator=' ', strip=True)
-
- # Limit to maxChars
- return textContent[:maxChars]
-
- def _limitText(self, text: str, maxChars: int = 10000) -> str:
- """
- Limit text to a maximum number of characters.
-
- Args:
- text: Input text
- maxChars: Maximum number of characters
-
- Returns:
- Limited text
- """
- if not text:
- return ""
-
- # If text is already under the limit, return unchanged
- if len(text) <= maxChars:
- return text
-
- # Otherwise limit text to maxChars
- return text[:maxChars] + "... [Content truncated due to length]"
-
-
-# Factory function for the Webcrawler agent
-def getAgentWebcrawler():
- """Returns an instance of the Webcrawler agent."""
- return AgentWebcrawler()
\ No newline at end of file
diff --git a/static/124_defAttributes.py b/static/124_defAttributes.py
deleted file mode 100644
index 731ecfd9..00000000
--- a/static/124_defAttributes.py
+++ /dev/null
@@ -1,123 +0,0 @@
-from pydantic import BaseModel, Field
-from typing import List, Dict, Any, Optional
-
-# Define the model for attribute definitions
-class AttributeDefinition(BaseModel):
- name: str
- label: str
- type: str
- required: bool = False
- placeholder: Optional[str] = None
- defaultValue: Optional[Any] = None
- options: Optional[List[Dict[str, Any]]] = None
- editable: bool = True
- visible: bool = True
- order: int = 0
- validation: Optional[Dict[str, Any]] = None
- helpText: Optional[str] = None
-
-# Helper classes for type mapping
-typeMappings = {
- "int": "number",
- "str": "string",
- "float": "number",
- "bool": "boolean",
- "List[int]": "array",
- "List[str]": "array",
- "Dict[str, Any]": "object",
- "Optional[str]": "string",
- "Optional[int]": "number",
- "Optional[Dict[str, Any]]": "object"
-}
-
-# Special field types based on naming conventions
-specialFieldTypes = {
- "content": "textarea",
- "description": "textarea",
- "instructions": "textarea",
- "password": "password",
- "email": "email",
- "workspaceId": "select",
- "agentId": "select",
- "type": "select"
-}
-
-# Function to convert a Pydantic model into attribute definitions
-def getModelAttributes(modelClass, userLanguage="de"):
- """
- Converts a Pydantic model into a list of AttributeDefinition objects
- """
- attributes = []
-
- # Go through all fields in the model
- for i, (fieldName, field) in enumerate(modelClass.__fields__.items()):
- # Skip internal fields
- if fieldName.startswith('_') or fieldName in ["label", "fieldLabels"]:
- continue
-
- # Determine the field type
- fieldType = typeMappings.get(str(field.type_), "string")
-
- # Check for special field types
- if fieldName in specialFieldTypes:
- fieldType = specialFieldTypes[fieldName]
-
- # Get the label (if available)
- fieldLabel = fieldName.replace('_', ' ').capitalize()
- if hasattr(modelClass, 'fieldLabels') and fieldName in modelClass.fieldLabels:
- labelObj = modelClass.fieldLabels[fieldName]
- fieldLabel = labelObj.getLabel(userLanguage)
-
- # Determine default values and required status
- required = field.required
- defaultValue = field.default if not field.required else None
-
- # Check for validation rules
- validation = None
- if field.validators:
- validation = {"hasValidators": True}
-
- # Placeholder text
- placeholder = f"Please enter {fieldLabel}"
-
- # Special options for Select fields
- options = None
- if fieldType == "select":
- if fieldName == "type" and modelClass.__name__ == "Agent":
- options = [
- {"value": "Analysis", "label": "Analysis"},
- {"value": "Transformation", "label": "Transformation"},
- {"value": "Generation", "label": "Generation"},
- {"value": "Classification", "label": "Classification"},
- {"value": "Custom", "label": "Custom"}
- ]
-
- # Extract description from Field object
- description = None
- # Try to get description from various possible sources
- if hasattr(field, 'field_info') and hasattr(field.field_info, 'description'):
- description = field.field_info.description
- elif hasattr(field, 'description'):
- description = field.description
- elif hasattr(field, 'schema') and hasattr(field.schema, 'description'):
- description = field.schema.description
-
- # Create attribute definition
- attrDef = AttributeDefinition(
- name=fieldName,
- label=fieldLabel,
- type=fieldType,
- required=required,
- placeholder=placeholder,
- defaultValue=defaultValue,
- options=options,
- editable=fieldName not in ["id", "mandateId", "userId", "createdAt", "uploadDate"],
- visible=fieldName not in ["hashedPassword", "mandateId", "userId"],
- order=i,
- validation=validation,
- helpText=description or "" # Set empty string as default value if no description found
- )
-
- attributes.append(attrDef)
-
- return attributes
\ No newline at end of file
diff --git a/static/125_gatewayInterface.py b/static/125_gatewayInterface.py
deleted file mode 100644
index 3e1120c7..00000000
--- a/static/125_gatewayInterface.py
+++ /dev/null
@@ -1,471 +0,0 @@
-"""
-Interface to the Gateway system.
-Manages users and mandates for authentication.
-"""
-
-import os
-import logging
-from typing import Dict, Any, List, Optional, Union
-import importlib
-from passlib.context import CryptContext
-
-from connectors.connectorDbJson import DatabaseConnector
-from modules.configuration import APP_CONFIG
-
-logger = logging.getLogger(__name__)
-
-# Password-Hashing
-pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
-
-
-class GatewayInterface:
- """
- Interface to the Gateway system.
- Manages users and mandates.
- """
-
- def __init__(self, mandateId: int = None, userId: int = None):
- """
- Initializes the Gateway Interface with optional mandate and user context.
-
- Args:
- mandateId: ID of the current mandate (optional)
- userId: ID of the current user (optional)
- """
- # Context can be empty during initialization
- self.mandateId = mandateId
- self.userId = userId
-
- # Import data model module
- try:
- self.modelModule = importlib.import_module("modules.gatewayModel")
- logger.info("gatewayModel successfully imported")
- except ImportError as e:
- logger.error(f"Error importing gatewayModel: {e}")
- raise
-
- # Initialize database
- self._initializeDatabase()
-
- def _initializeDatabase(self):
- """
- Initializes the database with minimal objects
- """
-
- self.db = DatabaseConnector(
- dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"),
- dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"),
- dbUser=APP_CONFIG.get("DB_SYSTEM_USER"),
- dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"),
- mandateId=self.mandateId if self.mandateId else 0,
- userId=self.userId if self.userId else 0
- )
-
- # Create Root mandate if needed
- existingMandateId = self.getInitialId("mandates")
- mandates = self.db.getRecordset("mandates")
- if existingMandateId is None or not mandates:
- logger.info("Creating Root mandate")
- rootMandate = {
- "name": "Root",
- "language": "de"
- }
- createdMandate = self.db.recordCreate("mandates", rootMandate)
- logger.info(f"Root mandate created with ID {createdMandate['id']}")
-
- # Update mandate context
- self.mandateId = createdMandate['id']
- self.userId = createdMandate['userId']
-
- # Recreate connector with correct context
- self.db = DatabaseConnector(
- dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"),
- dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"),
- dbUser=APP_CONFIG.get("DB_SYSTEM_USER"),
- dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"),
- mandateId=self.mandateId,
- userId=self.userId
- )
-
- # Create Admin user if needed
- existingUserId = self.getInitialId("users")
- users = self.db.getRecordset("users")
- if existingUserId is None or not users:
- logger.info("Creating Admin user")
- adminUser = {
- "mandateId": self.mandateId,
- "username": "admin",
- "email": "admin@example.com",
- "fullName": "Administrator",
- "disabled": False,
- "language": "de",
- "privilege": "sysadmin", # SysAdmin privilege
- "hashedPassword": self._getPasswordHash("admin") # Use a secure password in production!
- }
- createdUser = self.db.recordCreate("users", adminUser)
- logger.info(f"Admin user created with ID {createdUser['id']}")
-
- # Update user context
- self.userId = createdUser['id']
-
- # Recreate connector with correct context
- self.db = DatabaseConnector(
- dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"),
- dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"),
- dbUser=APP_CONFIG.get("DB_SYSTEM_USER"),
- dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"),
- mandateId=self.mandateId,
- userId=self.userId
- )
-
- def getInitialId(self, table: str) -> Optional[int]:
- """Returns the initial ID for a table"""
- return self.db.getInitialId(table)
-
- def _getPasswordHash(self, password: str) -> str:
- """Creates a hash for a password"""
- return pwdContext.hash(password)
-
- def _verifyPassword(self, plainPassword: str, hashedPassword: str) -> bool:
- """Checks if the password matches the hash"""
- return pwdContext.verify(plainPassword, hashedPassword)
-
- def _getCurrentTimestamp(self) -> str:
- """Returns the current timestamp in ISO format"""
- from datetime import datetime
- return datetime.now().isoformat()
-
- # Mandate methods
-
- def getAllMandates(self) -> List[Dict[str, Any]]:
- """Returns all mandates"""
- return self.db.getRecordset("mandates")
-
- def getMandate(self, mandateId: int) -> Optional[Dict[str, Any]]:
- """Returns a mandate by its ID"""
- mandates = self.db.getRecordset("mandates", recordFilter={"id": mandateId})
- if mandates:
- return mandates[0]
- return None
-
- def createMandate(self, name: str, language: str = "de") -> Dict[str, Any]:
- """Creates a new mandate"""
- mandateData = {
- "name": name,
- "language": language
- }
-
- return self.db.recordCreate("mandates", mandateData)
-
- def updateMandate(self, mandateId: int, mandateData: Dict[str, Any]) -> Dict[str, Any]:
- """
- Updates an existing mandate
-
- Args:
- mandateId: The ID of the mandate to update
- mandateData: The mandate data to update
-
- Returns:
- Dict[str, Any]: The updated mandate data
-
- Raises:
- ValueError: If the mandate is not found
- """
- # Check if the mandate exists
- mandate = self.getMandate(mandateId)
- if not mandate:
- raise ValueError(f"Mandate with ID {mandateId} not found")
-
- # Update the mandate
- updatedMandate = self.db.recordModify("mandates", mandateId, mandateData)
-
- return updatedMandate
-
- def deleteMandate(self, mandateId: int) -> bool:
- """
- Deletes a mandate and all associated users and data
-
- Args:
- mandateId: The ID of the mandate to delete
-
- Returns:
- bool: True if the mandate was successfully deleted, otherwise False
- """
- # Check if the mandate exists
- mandate = self.getMandate(mandateId)
- if not mandate:
- return False
-
- # Check if it's the initial mandate
- initialMandateId = self.getInitialId("mandates")
- if initialMandateId is not None and mandateId == initialMandateId:
- logger.warning(f"Attempt to delete the Root mandate was prevented")
- return False
-
- # Find all users of the mandate
- users = self.getUsersByMandate(mandateId)
-
- # Delete all users of the mandate and their associated data
- for user in users:
- self.deleteUser(user["id"])
-
- # Delete the mandate
- success = self.db.recordDelete("mandates", mandateId)
-
- if success:
- logger.info(f"Mandate with ID {mandateId} was successfully deleted")
- else:
- logger.error(f"Error deleting mandate with ID {mandateId}")
-
- return success
-
- # User methods
-
- def getAllUsers(self) -> List[Dict[str, Any]]:
- """Returns all users"""
- users = self.db.getRecordset("users")
- # Remove password hashes from the response
- for user in users:
- if "hashedPassword" in user:
- del user["hashedPassword"]
- return users
-
- def getUsersByMandate(self, mandateId: int) -> List[Dict[str, Any]]:
- """
- Returns all users of a specific mandate
-
- Args:
- mandateId: The ID of the mandate
-
- Returns:
- List[Dict[str, Any]]: List of users in the mandate
- """
- users = self.db.getRecordset("users", recordFilter={"mandateId": mandateId})
- # Remove password hashes from the response
- for user in users:
- if "hashedPassword" in user:
- del user["hashedPassword"]
- return users
-
- def getUserByUsername(self, username: str) -> Optional[Dict[str, Any]]:
- """Returns a user by username"""
- users = self.db.getRecordset("users")
- for user in users:
- if user.get("username") == username:
- return user
- return None
-
- def getUser(self, userId: int) -> Optional[Dict[str, Any]]:
- """Returns a user by ID"""
- users = self.db.getRecordset("users", recordFilter={"id": userId})
- if users:
- user = users[0]
- # Remove password hash from the API response
- if "hashedPassword" in user:
- userCopy = user.copy()
- del userCopy["hashedPassword"]
- return userCopy
- return user
- return None
-
- def createUser(self, username: str, password: str, email: str = None,
- fullName: str = None, language: str = "de", mandateId: int = None,
- disabled: bool = False, privilege: str = "user") -> Dict[str, Any]:
- """
- Creates a new user
-
- Args:
- username: The username
- password: The password
- email: The email address (optional)
- fullName: The full name (optional)
- language: The preferred language (default: "de")
- mandateId: The ID of the mandate (optional)
- disabled: Whether the user is disabled (default: False)
- privilege: The privilege level (default: "user")
-
- Returns:
- Dict[str, Any]: The created user data
-
- Raises:
- ValueError: If the username already exists
- """
- # Check if the username already exists
- existingUser = self.getUserByUsername(username)
- if existingUser:
- raise ValueError(f"User '{username}' already exists")
-
- # Use the provided mandateId or the current context
- userMandateId = mandateId if mandateId is not None else self.mandateId
-
- userData = {
- "mandateId": userMandateId,
- "username": username,
- "email": email,
- "fullName": fullName,
- "disabled": disabled,
- "language": language,
- "privilege": privilege,
- "hashedPassword": self._getPasswordHash(password)
- }
-
- createdUser = self.db.recordCreate("users", userData)
-
- # Remove password hash from the response
- if "hashedPassword" in createdUser:
- del createdUser["hashedPassword"]
-
- return createdUser
-
- def authenticateUser(self, username: str, password: str) -> Optional[Dict[str, Any]]:
- """
- Authenticates a user by username and password
-
- Args:
- username: The username
- password: The password
-
- Returns:
- Optional[Dict[str, Any]]: The user data or None if authentication fails
- """
- user = self.getUserByUsername(username)
-
- if not user:
- return None
-
- if not self._verifyPassword(password, user.get("hashedPassword", "")):
- return None
-
- # Check if the user is disabled
- if user.get("disabled", False):
- return None
-
- # Create a copy without password hash
- authenticatedUser = {**user}
- if "hashedPassword" in authenticatedUser:
- del authenticatedUser["hashedPassword"]
-
- return authenticatedUser
-
- def updateUser(self, userId: int, userData: Dict[str, Any]) -> Dict[str, Any]:
- """
- Updates a user
-
- Args:
- userId: The ID of the user to update
- userData: The user data to update
-
- Returns:
- Dict[str, Any]: The updated user data
-
- Raises:
- ValueError: If the user is not found
- """
- # Get the current user with password hash (directly from DB)
- users = self.db.getRecordset("users", recordFilter={"id": userId})
- if not users:
- raise ValueError(f"User with ID {userId} not found")
-
- user = users[0]
-
- # If the password is being changed, hash it
- if "password" in userData:
- userData["hashedPassword"] = self._getPasswordHash(userData["password"])
- del userData["password"]
-
- # Update the user
- updatedUser = self.db.recordModify("users", userId, userData)
-
- # Remove password hash from the response
- if "hashedPassword" in updatedUser:
- del updatedUser["hashedPassword"]
-
- return updatedUser
-
- def disableUser(self, userId: int) -> Dict[str, Any]:
- """Disables a user"""
- return self.updateUser(userId, {"disabled": True})
-
- def enableUser(self, userId: int) -> Dict[str, Any]:
- """Enables a user"""
- return self.updateUser(userId, {"disabled": False})
-
- def _deleteUserReferencedData(self, userId: int) -> None:
- """
- Deletes all data associated with a user
-
- Args:
- userId: The ID of the user
- """
- # Here all tables are searched and all entries referencing this user are deleted
-
- # Delete user attributes
- try:
- attributes = self.db.getRecordset("attributes", recordFilter={"userId": userId})
- for attribute in attributes:
- self.db.recordDelete("attributes", attribute["id"])
- except Exception as e:
- logger.error(f"Error deleting attributes for user {userId}: {e}")
-
- # Other tables that might reference the user
- # (Depending on the application's database structure)
-
- logger.info(f"All referenced data for user {userId} has been deleted")
-
- def deleteUser(self, userId: int) -> bool:
- """
- Deletes a user and all associated data
-
- Args:
- userId: The ID of the user to delete
-
- Returns:
- bool: True if the user was successfully deleted, otherwise False
- """
- # Check if the user exists
- users = self.db.getRecordset("users", recordFilter={"id": userId})
- if not users:
- return False
-
- # Check if it's the initial user
- initialUserId = self.getInitialId("users")
- if initialUserId is not None and userId == initialUserId:
- logger.warning("Attempt to delete the Root Admin was prevented")
- return False
-
- # Delete all data associated with the user
- self._deleteUserReferencedData(userId)
-
- # Delete the user
- success = self.db.recordDelete("users", userId)
-
- if success:
- logger.info(f"User with ID {userId} was successfully deleted")
- else:
- logger.error(f"Error deleting user with ID {userId}")
-
- return success
-
-
-# Singleton factory for GatewayInterface instances per context
-_gatewayInterfaces = {}
-
-def getGatewayInterface(mandateId: int = None, userId: int = None) -> GatewayInterface:
- """
- Returns a GatewayInterface instance for the specified context.
- Reuses existing instances.
-
- Args:
- mandateId: ID of the mandate
- userId: ID of the user
-
- Returns:
- GatewayInterface instance
- """
- contextKey = f"{mandateId}_{userId}"
- if contextKey not in _gatewayInterfaces:
- _gatewayInterfaces[contextKey] = GatewayInterface(mandateId, userId)
- return _gatewayInterfaces[contextKey]
-
-# Initialize the interface
-getGatewayInterface()
\ No newline at end of file
diff --git a/static/126_gatewayModel.py b/static/126_gatewayModel.py
deleted file mode 100644
index 83d759c7..00000000
--- a/static/126_gatewayModel.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""
-Data models for the gateway system.
-"""
-from pydantic import BaseModel, Field
-from typing import List, Dict, Any, Optional
-from datetime import datetime
-
-
-class Label(BaseModel):
- """Label for an attribute or a class with support for multiple languages"""
- default: str
- translations: Dict[str, str] = {}
-
- def getLabel(self, language: str = None):
- """Returns the label in the specified language, or the default value if not available"""
- if language and language in self.translations:
- return self.translations[language]
- return self.default
-
-
-class Mandate(BaseModel):
- """Data model for a mandate"""
- id: int = Field(description="Unique ID of the mandate")
- name: str = Field(description="Name of the mandate")
- language: str = Field(description="Default language of the mandate")
-
- label: Label = Field(
- default=Label(default="Mandate", translations={"en": "Mandate", "fr": "Mandat"}),
- description="Label for the class"
- )
-
- # Labels for attributes
- fieldLabels: Dict[str, Label] = {
- "id": Label(default="ID", translations={}),
- "name": Label(default="Name of the mandate", translations={"en": "Mandate name", "fr": "Nom du mandat"}),
- "language": Label(default="Language", translations={"en": "Language", "fr": "Langue"})
- }
-
-class User(BaseModel):
- """Data model for a user"""
- id: int = Field(description="Unique ID of the user")
- mandateId: int = Field(description="ID of the associated mandate")
- username: str = Field(description="Username for login")
- email: Optional[str] = Field(None, description="Email address of the user")
- fullName: Optional[str] = Field(None, description="Full name of the user")
- language: str = Field(description="Preferred language of the user")
- disabled: Optional[bool] = Field(False, description="Indicates whether the user is disabled")
- privilege: str = Field(description="Permission level") #sysadmin,admin,user
-
- label: Label = Field(
- default=Label(default="User", translations={"en": "User", "fr": "Utilisateur"}),
- description="Label for the class"
- )
-
- # Labels for attributes
- fieldLabels: Dict[str, Label] = {
- "id": Label(default="ID", translations={}),
- "mandateId": Label(default="Mandate ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}),
- "username": Label(default="Username", translations={"en": "Username", "fr": "Nom d'utilisateur"}),
- "email": Label(default="Email", translations={"en": "Email", "fr": "E-mail"}),
- "fullName": Label(default="Full name", translations={"en": "Full name", "fr": "Nom complet"}),
- "language": Label(default="Language", translations={"en": "Language", "fr": "Langue"}),
- "disabled": Label(default="Disabled", translations={"en": "Disabled", "fr": "Désactivé"}),
- "privilege": Label(default="Permission level", translations={"en": "Access level", "fr": "Niveau d'accès"}),
- }
-
-
-class UserInDB(User):
- """Extended user class with password hash"""
- hashedPassword: str = Field(description="Hash of the user password")
-
- label: Label = Field(
- default=Label(default="User Access", translations={"en": "User Access", "fr": "Accès de l'utilisateur"}),
- description="Label for the class"
- )
-
- # Additional label for the password field
- fieldLabels: Dict[str, Label] = {
- "hashedPassword": Label(default="Password hash", translations={"en": "Password hash", "fr": "Hachage de mot de passe"})
- }
-
-
-class Token(BaseModel):
- """Data model for an authentication token"""
- accessToken: str = Field(description="The issued access token")
- tokenType: str = Field(description="Type of token (usually 'bearer')")
- label: Label = Field(
- default=Label(default="Token", translations={"en": "Token", "fr": "Jeton"}),
- description="Label for the class"
- )
-
- # Labels for attributes
- fieldLabels: Dict[str, Label] = {
- "accessToken": Label(default="Access token", translations={"en": "Access token", "fr": "Jeton d'accès"}),
- "tokenType": Label(default="Token type", translations={"en": "Token type", "fr": "Type de jeton"})
- }
-
-
-class TokenData(BaseModel):
- """Data for token decoding and validation"""
- username: Optional[str] = None
- mandateId: Optional[int] = None
- exp: Optional[datetime] = None
\ No newline at end of file
diff --git a/static/127_documentProcessor.py b/static/127_documentProcessor.py
deleted file mode 100644
index 528109be..00000000
--- a/static/127_documentProcessor.py
+++ /dev/null
@@ -1,933 +0,0 @@
-"""
-Module for extracting content from various file formats.
-Provides specialized functions for processing text, PDF, Office documents, images, etc.
-"""
-
-import logging
-import os
-import io
-from typing import Dict, Any, List, Optional, Union, Tuple
-import base64
-
-# Configure logger
-logger = logging.getLogger(__name__)
-
-# Optional imports - only loaded when needed
-pdfExtractorLoaded = False
-officeExtractorLoaded = False
-imageProcessorLoaded = False
-
-def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]:
- """
- Main function for extracting content from a file based on its MIME type.
- Delegates to specialized extraction functions.
-
- Args:
- fileMetadata: File metadata (Name, MIME type, etc.)
- fileContent: Binary data of the file
-
- Returns:
- List of Document-Content objects with metadata and base64Encoded flag
- """
- try:
- mimeType = fileMetadata.get("mimeType", "application/octet-stream")
- fileName = fileMetadata.get("name", "unknown")
-
- logger.info(f"Extracting content from file '{fileName}' (MIME type: {mimeType})")
-
- # Extract content based on MIME type
- contents = []
-
- # Text-based formats (excluding CSV which has its own handler)
- if mimeType == "text/csv":
- contents.extend(extractCsvContent(fileName, fileContent))
-
- # Then handle other text-based formats
- elif mimeType.startswith("text/") or mimeType in [
- "application/json",
- "application/xml",
- "application/javascript",
- "application/x-python"
- ]:
- contents.extend(extractTextContent(fileName, fileContent, mimeType))
-
- # SVG Files
- elif mimeType == "image/svg+xml":
- contents.extend(extractSvgContent(fileName, fileContent))
-
- # Images
- elif mimeType.startswith("image/"):
- contents.extend(extractImageContent(fileName, fileContent, mimeType))
-
- # PDF Documents
- elif mimeType == "application/pdf":
- contents.extend(extractPdfContent(fileName, fileContent))
-
- # Word Documents
- elif mimeType in [
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
- "application/msword"
- ]:
- contents.extend(extractWordContent(fileName, fileContent, mimeType))
-
- # Excel Documents
- elif mimeType in [
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
- "application/vnd.ms-excel"
- ]:
- contents.extend(extractExcelContent(fileName, fileContent, mimeType))
-
- # PowerPoint Documents
- elif mimeType in [
- "application/vnd.openxmlformats-officedocument.presentationml.presentation",
- "application/vnd.ms-powerpoint"
- ]:
- contents.extend(extractPowerpointContent(fileName, fileContent, mimeType))
-
- # Binary data as fallback for unknown formats
- else:
- contents.extend(extractBinaryContent(fileName, fileContent, mimeType))
-
- # Fallback when no content could be extracted
- if not contents:
- logger.warning(f"No content extracted from file '{fileName}', using binary fallback")
-
- # Convert binary content to base64
- encoded_data = base64.b64encode(fileContent).decode('utf-8')
-
- contents.append({
- "sequenceNr": 1,
- "name": '1_undefined',
- "ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin",
- "contentType": mimeType,
- "data": encoded_data,
- "base64Encoded": True,
- "metadata": {
- "isText": False
- }
- })
-
- # Add generic attributes for all documents
- for content in contents:
- # Make sure all content items have the base64Encoded flag
- if "base64Encoded" not in content:
- if isinstance(content.get("data"), bytes):
- # Convert bytes to base64
- content["data"] = base64.b64encode(content["data"]).decode('utf-8')
- content["base64Encoded"] = True
- else:
- # Assume text content if not explicitly marked
- content["base64Encoded"] = False
-
- # Maintain backward compatibility with old "base64Encoded" flag in metadata
- if "metadata" not in content:
- content["metadata"] = {}
-
- # Set base64Encoded in metadata for backward compatibility
- content["metadata"]["base64Encoded"] = content["base64Encoded"]
-
- logger.info(f"Successfully extracted {len(contents)} content items from file '{fileName}'")
- return contents
-
- except Exception as e:
- logger.error(f"Error during content extraction: {str(e)}")
- # Fallback on error - return original data
- return [{
- "sequenceNr": 1,
- "name": fileMetadata.get("name", "unknown"),
- "ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin",
- "contentType": fileMetadata.get("mimeType", "application/octet-stream"),
- "data": base64.b64encode(fileContent).decode('utf-8'),
- "base64Encoded": True,
- "metadata": {
- "isText": False,
- "base64Encoded": True # For backward compatibility
- }
- }]
-
-
-def _loadPdfExtractor():
- """Loads PDF extraction libraries when needed"""
- global pdfExtractorLoaded
- if not pdfExtractorLoaded:
- try:
- global PyPDF2, fitz
- import PyPDF2
- import fitz # PyMuPDF for more extensive PDF processing
- pdfExtractorLoaded = True
- logger.info("PDF extraction libraries successfully loaded")
- except ImportError as e:
- logger.warning(f"PDF extraction libraries could not be loaded: {e}")
-
-def _loadOfficeExtractor():
- """Loads Office document extraction libraries when needed"""
- global officeExtractorLoaded
- if not officeExtractorLoaded:
- try:
- global docx, openpyxl
- import docx # python-docx for Word documents
- import openpyxl # for Excel files
- officeExtractorLoaded = True
- logger.info("Office extraction libraries successfully loaded")
- except ImportError as e:
- logger.warning(f"Office extraction libraries could not be loaded: {e}")
-
-def _loadImageProcessor():
- """Loads image processing libraries when needed"""
- global imageProcessorLoaded
- if not imageProcessorLoaded:
- try:
- global PIL, Image
- from PIL import Image
- imageProcessorLoaded = True
- logger.info("Image processing libraries successfully loaded")
- except ImportError as e:
- logger.warning(f"Image processing libraries could not be loaded: {e}")
-
-def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
- """
- Extracts text from text files.
-
- Args:
- fileName: Name of the file
- fileContent: Binary data of the file
- mimeType: MIME type of the file
-
- Returns:
- List of Text-Content objects with base64Encoded = False
- """
- try:
- # Keep original file extension
- fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "txt"
-
- # Extract text content
- textContent = fileContent.decode('utf-8')
- return [{
- "sequenceNr": 1,
- "name": "1_text", # Simplified naming
- "ext": fileExtension,
- "contentType": "text",
- "data": textContent,
- "base64Encoded": False,
- "metadata": {
- "isText": True
- }
- }]
- except UnicodeDecodeError:
- logger.warning(f"Could not decode text from file '{fileName}' as UTF-8, trying alternative encodings")
- try:
- # Try alternative encodings
- for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
- try:
- textContent = fileContent.decode(encoding)
- logger.info(f"Text successfully decoded with encoding {encoding}")
- return [{
- "sequenceNr": 1,
- "name": "1_text", # Simplified naming
- "ext": fileExtension,
- "contentType": "text",
- "data": textContent,
- "base64Encoded": False,
- "metadata": {
- "isText": True,
- "encoding": encoding
- }
- }]
- except UnicodeDecodeError:
- continue
-
- # Fallback to binary data if no encoding works
- logger.warning(f"Could not decode text, using binary data")
- return [{
- "sequenceNr": 1,
- "name": "1_binary", # Simplified naming
- "ext": fileExtension,
- "contentType": mimeType,
- "data": base64.b64encode(fileContent).decode('utf-8'),
- "base64Encoded": True,
- "metadata": {
- "isText": False
- }
- }]
- except Exception as e:
- logger.error(f"Error in alternative text decoding: {str(e)}")
- # Return binary data as fallback
- return [{
- "sequenceNr": 1,
- "name": "1_binary", # Simplified naming
- "ext": fileExtension,
- "contentType": mimeType,
- "data": base64.b64encode(fileContent).decode('utf-8'),
- "base64Encoded": True,
- "metadata": {
- "isText": False
- }
- }]
-
-def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]:
- """
- Extracts content from CSV files.
-
- Args:
- fileName: Name of the file
- fileContent: Binary data of the file
-
- Returns:
- List of CSV-Content objects with base64Encoded = False
- """
- try:
- # Extract text content
- csvContent = fileContent.decode('utf-8')
- return [{
- "sequenceNr": 1,
- "name": "1_csv", # Simplified naming
- "ext": "csv",
- "contentType": "csv",
- "data": csvContent,
- "base64Encoded": False,
- "metadata": {
- "isText": True,
- "format": "csv"
- }
- }]
- except UnicodeDecodeError:
- logger.warning(f"Could not decode CSV from file '{fileName}' as UTF-8, trying alternative encodings")
- try:
- # Try alternative encodings for CSV
- for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
- try:
- csvContent = fileContent.decode(encoding)
- logger.info(f"CSV successfully decoded with encoding {encoding}")
- return [{
- "sequenceNr": 1,
- "name": "1_csv", # Simplified naming
- "ext": "csv",
- "contentType": "csv",
- "data": csvContent,
- "base64Encoded": False,
- "metadata": {
- "isText": True,
- "encoding": encoding,
- "format": "csv"
- }
- }]
- except UnicodeDecodeError:
- continue
-
- # Fallback to binary data
- return [{
- "sequenceNr": 1,
- "name": "1_binary", # Simplified naming
- "ext": "csv",
- "contentType": "text/csv",
- "data": base64.b64encode(fileContent).decode('utf-8'),
- "base64Encoded": True,
- "metadata": {
- "isText": False
- }
- }]
- except Exception as e:
- logger.error(f"Error in alternative CSV decoding: {str(e)}")
- return [{
- "sequenceNr": 1,
- "name": "1_binary", # Simplified naming
- "ext": "csv",
- "contentType": "text/csv",
- "data": base64.b64encode(fileContent).decode('utf-8'),
- "base64Encoded": True,
- "metadata": {
- "isText": False
- }
- }]
-
-def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]:
- """
- Extracts content from SVG files.
-
- Args:
- fileName: Name of the file
- fileContent: Binary data of the file
-
- Returns:
- List of SVG-Content objects with dual text/image metadata
- """
- contents = []
-
- try:
- # Extract SVG as text content (XML)
- svgText = fileContent.decode('utf-8')
-
- # Check if it's actually SVG by looking for the SVG tag
- if "