prod azure 1.0.8

This commit is contained in:
ValueOn AG 2025-05-05 01:11:46 +02:00
parent 0b0d4f4fd8
commit c22b4af1ac
96 changed files with 801 additions and 23074 deletions

1
.gitignore vendored
View file

@ -167,3 +167,4 @@ cython_debug/
# local data # local data
gwserver/_database* gwserver/_database*
gwserver/results/* gwserver/results/*
*.log.*

View file

@ -33,13 +33,13 @@ Security_FAILED_LOGIN_LIMIT = 5
Security_LOCK_DURATION_MINUTES = 30 Security_LOCK_DURATION_MINUTES = 30
# Agent Webcrawler configuration # Agent Webcrawler configuration
Agent_Webcrawler_TIMEOUT = 10 Agent_Webcrawler_SERPAPI_ENGINE = google
Agent_Webcrawler_MAX_URLS = 3 Agent_Webcrawler_SERPAPI_APIKEY = 7304bd34bca767aa52dd3233297e30a9edc0abc57871f702b3f8238b9d3ee7bc
Agent_Webcrawler_MAX_TOKENS = 30000 Agent_Webcrawler_SERPAPI_MAX_URLS = 3
Agent_Webcrawler_USER_AGENT = Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Agent_Webcrawler_SERPAPI_MAX_SEARCH_KEYWORDS = 3
Agent_Webcrawler_SEARCH_ENGINE = https://html.duckduckgo.com/html/?q= Agent_Webcrawler_SERPAPI_MAX_SEARCH_RESULTS = 5
Agent_Webcrawler_MAX_SEARCH_KEYWORDS = 3 Agent_Webcrawler_SERPAPI_TIMEOUT = 10
Agent_Webcrawler_MAX_SEARCH_RESULTS = 5 Agent_Webcrawler_SERPAPI_USER_AGENT = Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36
# Agent Coder configuration # Agent Coder configuration
Agent_Coder_INSTALL_TIMEOUT = 180 Agent_Coder_INSTALL_TIMEOUT = 180

View file

@ -25,7 +25,7 @@ APP_TOKEN_EXPIRY=300
APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net APP_ALLOWED_ORIGINS=http://localhost:8080,https://playground.poweron-center.net
# Logging configuration # Logging configuration
APP_LOGGING_LOG_LEVEL = DEBUG APP_LOGGING_LOG_LEVEL = WARNING
APP_LOGGING_LOG_FILE = /home/poweron.log APP_LOGGING_LOG_FILE = /home/poweron.log
APP_LOGGING_FORMAT = %(asctime)s - %(levelname)s - %(name)s - %(message)s APP_LOGGING_FORMAT = %(asctime)s - %(levelname)s - %(name)s - %(message)s
APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S APP_LOGGING_DATE_FORMAT = %Y-%m-%d %H:%M:%S

View file

@ -131,12 +131,13 @@ class AgentCoder(AgentBase):
# 4. Generate code using AI # 4. Generate code using AI
code, requirements = await self._generateCode(prompt, outputSpecs) code, requirements = await self._generateCode(prompt, outputSpecs)
if not code: if not code:
return { return {
"feedback": "Failed to generate code for the task.", "feedback": "Failed to generate code for the task.",
"documents": [] "documents": []
} }
# Store the original code without document data
original_clean_code = code # Save clean code for later use in improvement
# 5. Replace the placeholder with actual inputFiles data # 5. Replace the placeholder with actual inputFiles data
documentDataJson = repr(documentData) documentDataJson = repr(documentData)
@ -171,7 +172,7 @@ class AgentCoder(AgentBase):
# Generate improved code based on error # Generate improved code based on error
improvedCode, improvedRequirements = await self._improveCode( improvedCode, improvedRequirements = await self._improveCode(
originalCode=codeWithData, originalCode=original_clean_code, # Use clean code without document data
error=error, error=error,
executionResult=executionResult, executionResult=executionResult,
attempt=retryCount + 1, attempt=retryCount + 1,
@ -179,7 +180,9 @@ class AgentCoder(AgentBase):
) )
if improvedCode: if improvedCode:
codeWithData = improvedCode # Inject document data into improved code
original_clean_code = improvedCode # Update clean code for next potential improvement
codeWithData = improvedCode.replace("inputFiles = \"=== JSONLOAD ===\"", f"inputFiles = {documentDataJson}")
requirements = improvedRequirements requirements = improvedRequirements
logger.info(f"Code improved for retry {retryCount + 2}") logger.info(f"Code improved for retry {retryCount + 2}")
else: else:
@ -313,14 +316,15 @@ STDOUT:
{outputSpecsStr} {outputSpecsStr}
INSTRUCTIONS: INSTRUCTIONS:
1. Fix all errors identified in the error message 1. Fix all errors identified in the error message
2. Diagnose and fix any logical issues 2. If there is a requirements error for missing or failes modules, then create alternate code with other modules
3. Pay special attention to: 3. Diagnose and fix any logical issues
4. Pay special attention to:
- Type conversions and data handling - Type conversions and data handling
- Error handling and edge cases - Error handling and edge cases
- Resource management (file handles, etc.) - Resource management (file handles, etc.)
- Syntax errors and typos - Syntax errors and typos
4. Keep the inputFiles handling logic intact 5. Keep the inputFiles handling logic intact
5. Maintain the same overall structure and purpose 6. Maintain the same overall structure and purpose
OUTPUT REQUIREMENTS (VERY IMPORTANT): OUTPUT REQUIREMENTS (VERY IMPORTANT):
- Your code MUST define a 'result' variable as a dictionary to store ALL outputs - Your code MUST define a 'result' variable as a dictionary to store ALL outputs

View file

@ -36,12 +36,18 @@ class AgentWebcrawler(AgentBase):
] ]
# Web crawling configuration # Web crawling configuration
self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_MAX_URLS", "5")) self.srcApikey = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_APIKEY","")
self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_KEYWORDS", "3")) self.srcEngine = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_ENGINE","google")
self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_RESULTS", "5")) self.srcCountry = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_COUNTRY","auto")
self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_TIMEOUT", "30")) self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_MAX_URLS", "5"))
self.searchEngine = APP_CONFIG.get("Agent_Webcrawler_SEARCH_ENGINE", "https://html.duckduckgo.com/html/?q=") self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_MAX_SEARCH_KEYWORDS", "3"))
self.userAgent = APP_CONFIG.get("Agent_Webcrawler_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36") self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_MAX_SEARCH_RESULTS", "5"))
self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_SERPAPI_TIMEOUT", "30"))
self.userAgent = APP_CONFIG.get("Agent_Webcrawler_SERPAPI_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")
if not self.srcApikey:
logger.error("SerpAPI key not configured")
def setDependencies(self, mydom=None): def setDependencies(self, mydom=None):
"""Set external dependencies for the agent.""" """Set external dependencies for the agent."""
@ -589,7 +595,7 @@ class AgentWebcrawler(AgentBase):
def _searchWeb(self, query: str) -> List[Dict[str, str]]: def _searchWeb(self, query: str) -> List[Dict[str, str]]:
""" """
Conduct a web search and return the results. Conduct a web search using SerpAPI and return the results.
Args: Args:
query: The search query query: The search query
@ -597,60 +603,56 @@ class AgentWebcrawler(AgentBase):
Returns: Returns:
List of search results List of search results
""" """
formattedQuery = quote_plus(query) if not self.srcApikey:
url = f"{self.searchEngine}{formattedQuery}"
searchResultsSoup = self._readUrl(url)
if not searchResultsSoup or not searchResultsSoup.select('.result'):
logger.warning(f"No search results found for: {query}")
return [] return []
# Extract search results # Get user language from mydom if available
userLanguage = "en" # Default language
if self.mydom.userLanguage:
userLanguage = self.mydom.userLanguage
try:
# Format the search request for SerpAPI
params = {
"engine": self.srcEngine,
"q": query,
"api_key": self.srcApikey,
"num": self.maxResults, # Number of results to return
"hl": userLanguage # Identified user language
}
# Make the API request
response = requests.get("https://serpapi.com/search", params=params, timeout=self.timeout)
response.raise_for_status()
# Parse JSON response
search_results = response.json()
# Extract organic results
results = [] results = []
# Find all result containers if "organic_results" in search_results:
resultElements = searchResultsSoup.select('.result') for result in search_results["organic_results"][:self.maxResults]:
for result in resultElements:
# Extract title # Extract title
titleElement = result.select_one('.result__a') title = result.get("title", "No title")
title = titleElement.text.strip() if titleElement else 'No title'
# Extract URL (DuckDuckGo uses redirects) # Extract URL
urlElement = titleElement.get('href') if titleElement else '' url = result.get("link", "No URL")
extractedUrl = 'No URL'
if urlElement: # Extract snippet
# Extract actual URL from DuckDuckGo's redirect snippet = result.get("snippet", "No description")
if urlElement.startswith('/d.js?q='):
start = urlElement.find('?q=') + 3
end = urlElement.find('&', start) if '&' in urlElement[start:] else None
extractedUrl = unquote(urlElement[start:end])
# Ensure URL has correct protocol prefix
if not extractedUrl.startswith(('http://', 'https://')):
if not extractedUrl.startswith('//'):
extractedUrl = 'https://' + extractedUrl
else:
extractedUrl = 'https:' + extractedUrl
else:
extractedUrl = urlElement
# Extract snippet directly from search results page
snippetElement = result.select_one('.result__snippet')
snippet = snippetElement.text.strip() if snippetElement else 'No description'
# Get actual page content # Get actual page content
try: try:
targetPageSoup = self._readUrl(extractedUrl) targetPageSoup = self._readUrl(url)
content = self._extractMainContent(targetPageSoup) content = self._extractMainContent(targetPageSoup)
except Exception as e: except Exception as e:
logger.warning(f"Error extracting content from {extractedUrl}: {str(e)}") logger.warning(f"Error extracting content from {url}: {str(e)}")
content = f"Error extracting content: {str(e)}" content = f"Error extracting content: {str(e)}"
results.append({ results.append({
'title': title, 'title': title,
'url': extractedUrl, 'url': url,
'snippet': snippet, 'snippet': snippet,
'data': content 'data': content
}) })
@ -658,9 +660,15 @@ class AgentWebcrawler(AgentBase):
# Limit number of results # Limit number of results
if len(results) >= self.maxResults: if len(results) >= self.maxResults:
break break
else:
logger.warning(f"No organic results found in SerpAPI response for: {query}")
return results return results
except Exception as e:
logger.error(f"Error searching with SerpAPI for {query}: {str(e)}")
return []
def _readUrl(self, url: str) -> BeautifulSoup: def _readUrl(self, url: str) -> BeautifulSoup:
""" """
Read a URL and return a BeautifulSoup parser for the content. Read a URL and return a BeautifulSoup parser for the content.

View file

@ -229,14 +229,15 @@ class LucyDOMInterface:
# Regular users can create in most tables # Regular users can create in most tables
return True return True
# Language support methods # Language support method
def setUserLanguage(self, languageCode: str): def setUserLanguage(self, languageCode: str):
"""Set the user's preferred language""" """Set the user's preferred language"""
self.userLanguage = languageCode self.userLanguage = languageCode
self.currentUser["language"] = languageCode
logger.info(f"User language set to: {languageCode}") logger.info(f"User language set to: {languageCode}")
# AI Call Root Function
async def callAi(self, messages: List[Dict[str, str]], produceUserAnswer: bool = False, temperature: float = None) -> str: async def callAi(self, messages: List[Dict[str, str]], produceUserAnswer: bool = False, temperature: float = None) -> str:
"""Enhanced AI service call with language support.""" """Enhanced AI service call with language support."""
if not self.aiService: if not self.aiService:
@ -679,7 +680,7 @@ class LucyDOMInterface:
self.createFileData(dbFile["id"], fileContent) self.createFileData(dbFile["id"], fileContent)
# Debug: Export file to static folder # Debug: Export file to static folder
self._exportFileToStatic(fileContent, dbFile["id"], fileName) # self._exportFileToStatic(fileContent, dbFile["id"], fileName)
logger.info(f"File upload process completed for: {fileName}") logger.info(f"File upload process completed for: {fileName}")
return dbFile return dbFile

View file

@ -860,6 +860,13 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
return preparedInputs return preparedInputs
async def messageSummarizeContent(self, content: Dict[str, Any]) -> str:
return await self.getContentExtraction(
content,
"Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
)
async def processDocumentForAgent(self, document: Dict[str, Any], docSpec: Dict[str, Any]) -> Dict[str, Any]: async def processDocumentForAgent(self, document: Dict[str, Any], docSpec: Dict[str, Any]) -> Dict[str, Any]:
""" """
Processes a document for an agent based on the document specification. Processes a document for an agent based on the document specification.
@ -880,83 +887,72 @@ filesDelivered = {self.parseJson2text(matchingDocuments)}
processedContents = [] processedContents = []
for content in processedDoc["contents"]: for content in processedDoc["contents"]:
# Check if part required # Check if part required
if partSpec != "" and partSpec != content.get("name"): if partSpec != "" and partSpec != content.get("name"):
continue continue
# Get the data from the content # Get the prompt from the document specification
data = content.get("data", "")
processedContent = content.copy()
# Check if content data is base64 encoded
isBase64 = content.get("metadata", {}).get("base64Encoded", False)
try:
# Use the AI service to process the document content according to the prompt from the project manager for the document specification
summary = docSpec.get("prompt", "Extract the relevant information from this document") summary = docSpec.get("prompt", "Extract the relevant information from this document")
aiPrompt = f"""
# Please process the following document content according to this instruction:
<instruction>
{summary}
</instruction>
# Document content: # Process content using the shared helper function
<data> processedContent = content.copy()
{data} processedContent["dataExtracted"] = await self.getContentExtraction(content, summary)
</data>
# Extract and provide only the relevant information as requested.
"""
# Call the AI service through mydom for language support
processedData = await self.mydom.callAi([
{"role": "system", "content": "You are a document processing assistant. Extract only the relevant information as requested."},
{"role": "user", "content": aiPrompt}
])
# DO NOT change the original data field
# processedContent["data"] unchanged
processedContent["dataExtracted"] = processedData
processedContent["metadata"]["aiProcessed"] = True processedContent["metadata"]["aiProcessed"] = True
except Exception as e:
logger.error(f"Error processing document content with AI: {str(e)}")
# Fall back to original content if AI processing fails
processedContent["dataExtracted"] = "(no information)"
processedContents.append(processedContent) processedContents.append(processedContent)
processedDoc["contents"] = processedContents processedDoc["contents"] = processedContents
return processedDoc return processedDoc
async def messageSummarizeContent(self, content: Dict[str, Any]) -> str: async def getContentExtraction(self, content: Dict[str, Any], prompt: str = None) -> str:
""" """
Generates a summary for a content item using AI. Helper function that extracts or summarizes content based on its type (text/image/binary).
Args: Args:
content: Content item to summarize (already processed by getDocumentContents) content: Content item to analyze
prompt: Optional custom prompt for extraction (default prompts used if not provided)
Returns: Returns:
Brief summary of the content Extracted or summarized content as text
""" """
# Extract relevant information # Extract relevant information
data = content.get("data", "") data = content.get("data", "")
contentType = content.get("contentType", "text/plain") contentType = content.get("contentType", "text/plain")
isText = content.get("metadata", {}).get("isText", False) base64Encoded = content.get("base64Encoded", False)
# Default prompts if none provided
if prompt is None:
text_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this content."
image_prompt = "Create a very concise summary (1-2 sentences, maximum 200 characters) about this image."
else:
text_prompt = prompt
image_prompt = prompt
try: try:
# Use the mydom for language-aware AI calls # For image content, use the specialized image analysis
summary = await self.mydom.callAi([ if contentType.startswith("image/") or content.get("metadata", {}).get("isImage", False):
{"role": "system", "content": "You are a content summarizer. Create very concise summary (1-2 sentences, maximum 200 characters) about this file."}, # analyzeImage handles base64 encoded data internally
{"role": "user", "content": f"Summarize this {contentType} content briefly:\n\n{data}"} return await self.mydom.analyzeImage(data, contentType, image_prompt)
# For binary data (base64Encoded but not an image), provide a generic description
elif base64Encoded:
metadata = content.get("metadata", {})
format_type = metadata.get("format", "unknown")
return f"Binary {format_type} data ({contentType})"
# For text data, use the regular AI processing
else:
return await self.mydom.callAi([
{"role": "system", "content": "You are a content analyzer. Process the provided content as instructed."},
{"role": "user", "content": f"{text_prompt}\n\n{data}"}
]) ])
return summary
except Exception as e: except Exception as e:
logger.error(f"Error generating content summary: {str(e)}") logger.error(f"Error processing content: {str(e)}")
return f"Text content ({contentType})" return f"Content of type {contentType} (processing failed)"
def messageAdd(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> Dict[str, Any]: def messageAdd(self, workflow: Dict[str, Any], message: Dict[str, Any]) -> Dict[str, Any]:
""" """

View file

@ -1,20 +1,15 @@
....................... TASKS ....................... TASKS
UI: Workflow reset does not reset log and messages view
----------------------- OPEN ----------------------- OPEN
PRIO1: PRIO1:
CHECK: If pictures not displayed to check utf-8 encoding in the base64 string!! general file writing and reading (example with svg) CHECK: If pictures not displayed to check utf-8 encoding in the base64 string!! general file writing and reading (example with svg)
STOP File export to static folder ("TODO)
add connector to myoutlook add connector to myoutlook
PRIO2: PRIO2:
todo an agent for "code writing and editing" connected to the codebase, working in loops over each document... todo an agent for "code writing and editing" connected to the codebase, working in loops over each document...
@ -30,7 +25,12 @@ frontend to react
frontend: no labels definition frontend: no labels definition
PRIO3:
Tools to transfer incl funds:
- Google SERPAPI (shelly)
- Anthropic Claude (valueon + shelly)
-
----------------------- DONE ----------------------- DONE

643
notes/doc_system.md Normal file
View file

@ -0,0 +1,643 @@
# Agent Chat System Handbook
# Einführung in das Agent Chat System Handbuch
## Zweck und Umfang des Dokuments
Willkommen zum "Agent Chat System Handbook". Dieses Handbuch dient als umfassende Anleitung für die Implementierung und Verwaltung eines Agent Chat Systems unter Verwendung von FastAPI. Es richtet sich an technische Fachkräfte, die für die Einrichtung, Verwaltung und Optimierung von Chat-Systemen verantwortlich sind. Ziel ist es, Ihnen die notwendigen Kenntnisse und Werkzeuge an die Hand zu geben, um ein effizientes und sicheres Chat-System zu entwickeln und zu betreiben.
## Kontext und Hintergrundinformationen
In der heutigen digitalen Welt sind Chat-Systeme ein wesentlicher Bestandteil der Kundenkommunikation und des Supports. Mit der zunehmenden Integration von Künstlicher Intelligenz (KI) in diese Systeme wird es immer wichtiger, robuste und skalierbare Lösungen zu entwickeln. FastAPI bietet eine moderne und leistungsstarke Plattform zur Erstellung von Web-APIs, die sich ideal für die Entwicklung eines solchen Systems eignet. Dieses Handbuch basiert auf den bereitgestellten FastAPI-Anwendungsdateien und bietet eine detaillierte Anleitung zur Implementierung eines Agent Chat Systems.
## Inhalt des Dokuments
Im "Agent Chat System Handbook" finden Sie detaillierte Informationen zu folgenden Themen:
- **FastAPI Setup**: Schritt-für-Schritt-Anleitung zur Einrichtung der FastAPI-Umgebung.
- **Benutzerverwaltung**: Methoden zur Verwaltung von Benutzerkonten und -rollen.
- **KI-Integration**: Implementierung von KI-Funktionen zur Verbesserung der Chat-Interaktionen.
- **Authentifizierung**: Sicherstellung der Sicherheit und Integrität des Systems durch robuste Authentifizierungsmechanismen.
- **Mandatsverwaltung**: Verwaltung von Benutzerrechten und -mandaten innerhalb des Systems.
- **Attributverwaltung**: Umgang mit benutzerdefinierten Attributen und deren Verwaltung.
- **Prompt-Management**: Erstellung und Verwaltung von Eingabeaufforderungen für die KI-Interaktion.
- **Dateioperationen**: Verwaltung und Verarbeitung von Dateien innerhalb des Systems.
- **Workflow-Management**: Optimierung und Automatisierung von Arbeitsabläufen im Chat-System.
## Ton und Zielgruppe
Dieses Handbuch ist in einem formellen und technischen Ton verfasst, um den Anforderungen einer professionellen Leserschaft gerecht zu werden. Es richtet sich an Entwickler, Systemadministratoren und technische Projektleiter, die mit der Implementierung und Verwaltung von Chat-Systemen betraut sind. Wir empfehlen, dass die Leser über grundlegende Kenntnisse in FastAPI und Web-API-Entwicklung verfügen, um den maximalen Nutzen aus diesem Handbuch zu ziehen.
Wir hoffen, dass dieses Handbuch Ihnen als wertvolle Ressource dient und Sie bei der erfolgreichen Implementierung Ihres Agent Chat Systems unterstützt.
# Einführung
## Zweck des Handbuchs
Das "Agent Chat System Handbook" dient als umfassende Anleitung zur Implementierung und Nutzung des Agenten-Chat-Systems, das auf der FastAPI-Plattform basiert. Dieses Handbuch richtet sich an technische Anwender, die eine detaillierte Anleitung zur Einrichtung, Verwaltung und Optimierung des Systems benötigen. Es bietet eine strukturierte Übersicht über die verschiedenen Komponenten und Funktionen des Systems, um eine effiziente Nutzung und Anpassung zu gewährleisten. Ziel ist es, den Anwendern ein tiefes Verständnis der Systemarchitektur und der zugrunde liegenden Prozesse zu vermitteln, um eine reibungslose Integration und Verwaltung zu ermöglichen.
## Systemübersicht
Das Agenten-Chat-System ist eine leistungsstarke Plattform, die entwickelt wurde, um die Kommunikation zwischen Agenten und Nutzern zu optimieren. Es nutzt die FastAPI-Technologie, um eine schnelle und skalierbare Lösung zu bieten. Die Hauptkomponenten des Systems umfassen:
- **Anwendungssetup**: Die FastAPI-Anwendung wird mit spezifischen Konfigurationen für Logging, CORS (Cross-Origin Resource Sharing) und Authentifizierung eingerichtet. Diese Konfigurationen sind entscheidend für die Sicherheit und Leistung der Anwendung.
- **Benutzerverwaltung**: Ein robustes Modul zur Verwaltung von Benutzerkonten, das die Erstellung, Aktualisierung und Löschung von Benutzerprofilen ermöglicht. Es stellt sicher, dass nur autorisierte Benutzer Zugriff auf das System haben.
- **Mandatsverwaltung**: Diese Komponente ermöglicht die Verwaltung von Mandaten, die den Zugriff und die Berechtigungen innerhalb des Systems regeln. Sie ist essenziell für die Einhaltung von Sicherheitsrichtlinien.
- **Attributverwaltung**: Ein flexibles System zur Verwaltung von Attributen, die zur Personalisierung und Anpassung der Benutzererfahrung verwendet werden können.
- **Prompt-Management**: Diese Funktion ermöglicht die Verwaltung von Eingabeaufforderungen, die zur Interaktion mit den Nutzern verwendet werden. Sie ist entscheidend für die Anpassung der Kommunikation an spezifische Anforderungen.
- **Dateioperationen**: Ein Modul zur effizienten Handhabung von Dateivorgängen, das das Hochladen, Herunterladen und Verwalten von Dateien innerhalb des Systems unterstützt.
- **Workflow-Management**: Diese Komponente ermöglicht die Definition und Verwaltung von Arbeitsabläufen, um die Effizienz und Konsistenz der Prozesse zu gewährleisten.
- **KI-Integration**: Das System bietet eine nahtlose Integration von KI-Technologien, um die Interaktion und Entscheidungsfindung zu verbessern.
- **Authentifizierung**: Ein sicheres Authentifizierungssystem, das sicherstellt, dass nur berechtigte Benutzer Zugriff auf die Anwendung haben.
Dieses Handbuch wird detaillierte Anleitungen und Beispiele für jede dieser Komponenten bieten, um eine umfassende Unterstützung bei der Implementierung und Verwaltung des Agenten-Chat-Systems zu gewährleisten.
# Application Setup
In diesem Abschnitt des "Agent Chat System Handbook" wird die Einrichtung der Anwendung detailliert beschrieben. Diese Anleitung richtet sich an technische Benutzer und bietet eine umfassende Übersicht über die Initialisierung der FastAPI-Anwendung, die Konfiguration von statischen Dateien und die allgemeinen Endpunkte.
## FastAPI Initialization
Die Initialisierung der FastAPI-Anwendung ist der erste Schritt zur Einrichtung des Agent Chat Systems. Hierbei werden grundlegende Parameter und Konfigurationen festgelegt, die für den Betrieb der Anwendung erforderlich sind.
### Schritte zur Initialisierung:
1. **Anwendungserstellung**:
- Die FastAPI-Anwendung wird mit einem Titel und einer Beschreibung initialisiert. Diese Informationen sind nützlich für die Dokumentation und API-Dokumentationsseiten.
- Beispiel:
```python
from fastapi import FastAPI
app = FastAPI(
title="Agent Chat System",
description="Ein System zur Verwaltung von Agenten-Chats"
)
```
2. **Lebenszyklus-Management**:
- Die Anwendung verwendet einen Lebenszyklus-Manager, um Ereignisse beim Start und Herunterfahren der Anwendung zu verwalten. Dies ist entscheidend für die ordnungsgemäße Ressourcenverwaltung.
- Beispiel:
```python
@app.on_event("startup")
async def startup_event():
# Initialisierungslogik hier
@app.on_event("shutdown")
async def shutdown_event():
# Bereinigungslogik hier
```
3. **CORS-Konfiguration**:
- Die Cross-Origin Resource Sharing (CORS) Einstellungen werden konfiguriert, um den Zugriff von verschiedenen Ursprüngen zu ermöglichen, was besonders wichtig für Webanwendungen ist, die auf verschiedenen Domains gehostet werden.
## Static Files Setup
Die Konfiguration von statischen Dateien ermöglicht es der Anwendung, Ressourcen wie Bilder, CSS-Dateien und JavaScript-Dateien bereitzustellen, die für die Benutzeroberfläche benötigt werden.
### Schritte zur Konfiguration:
1. **Verzeichnis für statische Dateien**:
- Ein Verzeichnis wird definiert, in dem alle statischen Dateien gespeichert werden. Dieses Verzeichnis wird in der Regel relativ zum Projektverzeichnis angegeben.
- Beispiel:
```python
from fastapi.staticfiles import StaticFiles
app.mount("/static", StaticFiles(directory="static"), name="static")
```
2. **Zugriff auf statische Dateien**:
- Die Anwendung stellt sicher, dass die statischen Dateien über einen bestimmten URL-Pfad zugänglich sind, was die Bereitstellung und den Zugriff auf diese Ressourcen erleichtert.
## Endpoints Overview
Die Endpunkte der Anwendung sind die Schnittstellen, über die externe Systeme und Benutzer mit der Anwendung interagieren können. Eine klare Übersicht über die verfügbaren Endpunkte ist entscheidend für die Integration und Nutzung der Anwendung.
### Allgemeine Endpunkte:
1. **Benutzerverwaltung**:
- Endpunkte zur Erstellung, Aktualisierung und Löschung von Benutzern.
- Beispiel:
```python
@app.post("/users/")
async def create_user(user: User):
# Logik zur Benutzererstellung
```
2. **Mandatsverwaltung**:
- Endpunkte zur Verwaltung von Mandaten, einschließlich der Zuweisung und Verwaltung von Berechtigungen.
3. **Attributverwaltung**:
- Endpunkte zur Verwaltung von Attributen, die für die Anpassung und Personalisierung der Agenten-Chats verwendet werden.
4. **Prompt-Management**:
- Endpunkte zur Verwaltung von Eingabeaufforderungen, die für die Interaktion mit Benutzern verwendet werden.
Diese detaillierte Anleitung zur Einrichtung der Anwendung stellt sicher, dass technische Benutzer die FastAPI-Anwendung korrekt initialisieren und konfigurieren können, um eine reibungslose Funktionalität des Agent Chat Systems zu gewährleisten.
# Logging
In diesem Abschnitt des "Agent Chat System Handbook" wird die Konfiguration und Einrichtung des Loggings im Rahmen der FastAPI-Anwendung beschrieben. Eine ordnungsgemäße Protokollierung ist entscheidend für die Überwachung und Fehlerbehebung der Anwendung. Dieser Abschnitt ist in zwei Hauptunterabschnitte unterteilt: Initialisierung und Handler.
## Initialisierung
Die Initialisierung des Loggings ist ein wesentlicher Schritt, um sicherzustellen, dass alle Ereignisse innerhalb der Anwendung korrekt erfasst werden. Die Konfiguration des Loggings erfolgt in der Regel zu Beginn der Anwendung, um sicherzustellen, dass alle nachfolgenden Prozesse und Ereignisse protokolliert werden.
### Beispiel für die Logging-Initialisierung
```python
import logging
def initialize_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler("app.log"),
logging.StreamHandler()
]
)
```
In diesem Beispiel wird das Logging mit einem Basislevel von `INFO` konfiguriert. Die Formatierung der Log-Nachrichten umfasst das Datum und die Uhrzeit, den Namen des Loggers, die Log-Stufe und die eigentliche Nachricht. Zwei Handler werden eingerichtet: ein `FileHandler`, der die Logs in eine Datei schreibt, und ein `StreamHandler`, der die Logs auf der Konsole ausgibt.
## Handler
Handler sind ein wesentlicher Bestandteil des Loggings, da sie bestimmen, wohin die Log-Nachrichten gesendet werden. In der FastAPI-Anwendung können verschiedene Arten von Handlern eingerichtet werden, um die Protokollierung flexibel und anpassbar zu gestalten.
### Einrichtung von Handlers
1. **FileHandler**: Dieser Handler schreibt Log-Nachrichten in eine Datei. Er ist nützlich für die langfristige Speicherung von Logs und die spätere Analyse.
```python
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.INFO)
```
2. **StreamHandler**: Dieser Handler gibt Log-Nachrichten auf der Konsole aus. Er ist besonders nützlich für die Echtzeitüberwachung während der Entwicklung und des Debuggings.
```python
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
```
3. **Custom Handler**: Bei Bedarf können benutzerdefinierte Handler erstellt werden, um spezielle Anforderungen zu erfüllen, wie z.B. das Senden von Logs an externe Systeme oder Dienste.
### Beispiel für die Handler-Konfiguration
```python
logger = logging.getLogger('agent_chat_system')
logger.setLevel(logging.DEBUG)
# Hinzufügen der Handler zum Logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
```
In diesem Beispiel wird ein Logger mit dem Namen `agent_chat_system` erstellt und auf das Level `DEBUG` gesetzt. Die zuvor definierten `FileHandler` und `StreamHandler` werden dem Logger hinzugefügt, um die Log-Nachrichten sowohl in eine Datei als auch auf der Konsole auszugeben.
Durch die sorgfältige Konfiguration von Logging und Handlers kann die FastAPI-Anwendung effektiv überwacht und gewartet werden, was zu einer verbesserten Stabilität und Fehlerbehebung führt.
# Benutzerverwaltung
In diesem Abschnitt des "Agent Chat System Handbook" wird die Benutzerverwaltung detailliert beschrieben. Die Benutzerverwaltung ist ein zentraler Bestandteil des Systems, da sie die Zuweisung von Rollen und Berechtigungen sowie die Authentifizierungsmechanismen umfasst. Diese Aspekte sind entscheidend für die Sicherheit und Effizienz des Systems.
## Rollen und Berechtigungen
### Benutzerrollen
Benutzerrollen definieren die verschiedenen Zugriffsebenen und Verantwortlichkeiten innerhalb des Agent Chat Systems. Jede Rolle hat spezifische Berechtigungen, die den Zugriff auf bestimmte Funktionen und Daten steuern. Die Hauptrollen sind:
- **Administrator**: Hat umfassende Berechtigungen, einschließlich der Verwaltung von Benutzern, Rollen und Systemeinstellungen.
- **Agent**: Kann auf die Chat-Funktionalitäten zugreifen und mit Kunden interagieren.
- **Supervisor**: Überwacht die Aktivitäten der Agenten und hat Zugriff auf Berichte und Analysen.
### Berechtigungen
Berechtigungen sind spezifische Rechte, die einer Rolle zugewiesen werden. Sie bestimmen, welche Aktionen ein Benutzer innerhalb des Systems ausführen kann. Beispiele für Berechtigungen sind:
- Zugriff auf das Dashboard
- Verwaltung von Benutzerkonten
- Einsicht in Berichte und Statistiken
- Konfiguration von Systemeinstellungen
Die Zuweisung von Rollen und Berechtigungen erfolgt über die Administrationsoberfläche des Systems, wo Administratoren die Möglichkeit haben, Benutzerkonten zu erstellen und zu verwalten.
## Authentifizierung
### Authentifizierungsmechanismen
Die Authentifizierung ist ein kritischer Sicherheitsaspekt des Agent Chat Systems. Sie stellt sicher, dass nur autorisierte Benutzer Zugriff auf das System erhalten. Die gängigen Authentifizierungsmechanismen umfassen:
- **Passwortbasierte Authentifizierung**: Benutzer melden sich mit einem Benutzernamen und einem Passwort an. Es wird empfohlen, starke Passwörter zu verwenden und regelmäßige Passwortänderungen durchzuführen.
- **Zwei-Faktor-Authentifizierung (2FA)**: Erhöht die Sicherheit, indem ein zusätzlicher Verifizierungsschritt hinzugefügt wird, z.B. ein einmaliger Code, der an das Mobiltelefon des Benutzers gesendet wird.
- **OAuth 2.0**: Ermöglicht die Authentifizierung über Drittanbieter, wie Google oder Facebook, was den Anmeldeprozess für Benutzer vereinfacht und die Sicherheit erhöht.
### Implementierung in FastAPI
Die FastAPI-Anwendung implementiert diese Authentifizierungsmechanismen durch die Integration von Sicherheitsprotokollen und Middleware. Die Konfiguration erfolgt in der `app.py` Datei, wo die Authentifizierungslogik definiert ist. Hier ein Beispiel für die Implementierung der passwortbasierten Authentifizierung:
```python
from fastapi import FastAPI, Depends
from fastapi.security import OAuth2PasswordBearer
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
@app.post("/token")
async def login(form_data: OAuth2PasswordRequestForm = Depends()):
# Authentifizierungslogik hier
return {"access_token": "token", "token_type": "bearer"}
```
Diese Struktur ermöglicht eine flexible und sichere Verwaltung der Benutzerzugriffe und gewährleistet, dass das System den aktuellen Sicherheitsstandards entspricht.
Durch die sorgfältige Verwaltung von Rollen, Berechtigungen und Authentifizierungsmechanismen wird sichergestellt, dass das Agent Chat System sowohl sicher als auch effizient betrieben werden kann.
# Mandate Management
In diesem Abschnitt des "Agent Chat System Handbook" wird das Mandatsmanagement detailliert beschrieben. Das Mandatsmanagement ist ein wesentlicher Bestandteil des Systems, der die Erstellung und Verwaltung von Mandaten umfasst. Diese Funktionen sind entscheidend für die Organisation und den Betrieb des Agenten-Chat-Systems.
## Mandate Creation
### Erstellung von Mandaten
Die Erstellung von Mandaten ist der erste Schritt im Mandatsmanagement. Ein Mandat definiert die spezifischen Aufgaben und Verantwortlichkeiten, die einem Agenten oder einer Gruppe von Agenten zugewiesen werden. Die Erstellung eines Mandats erfolgt in mehreren Schritten:
1. **Initialisierung**: Beginnen Sie mit der Definition der grundlegenden Parameter des Mandats, einschließlich des Titels, der Beschreibung und der beteiligten Agenten.
2. **Zuweisung von Aufgaben**: Weisen Sie spezifische Aufgaben oder Ziele zu, die im Rahmen des Mandats erreicht werden sollen. Diese Aufgaben sollten klar definiert und messbar sein.
3. **Festlegung von Fristen**: Bestimmen Sie die zeitlichen Rahmenbedingungen für das Mandat, einschließlich Start- und Enddatum sowie Meilensteine.
4. **Ressourcenzuweisung**: Stellen Sie sicher, dass die notwendigen Ressourcen, wie z.B. technische Tools oder Daten, den Agenten zur Verfügung stehen.
5. **Genehmigung**: Das Mandat muss von einer autorisierten Person oder einem Gremium genehmigt werden, bevor es aktiv wird.
## Mandate Lifecycle
### Lebenszyklus eines Mandats
Der Lebenszyklus eines Mandats umfasst mehrere Phasen, die sicherstellen, dass das Mandat effektiv verwaltet und abgeschlossen wird:
1. **Initiierung**: Nach der Erstellung wird das Mandat offiziell gestartet. Alle beteiligten Parteien werden informiert und die notwendigen Ressourcen bereitgestellt.
2. **Durchführung**: In dieser Phase arbeiten die Agenten an den zugewiesenen Aufgaben. Fortschritte werden regelmäßig überwacht und dokumentiert.
3. **Überwachung und Anpassung**: Der Fortschritt des Mandats wird kontinuierlich überwacht. Bei Bedarf werden Anpassungen vorgenommen, um sicherzustellen, dass die Ziele erreicht werden.
4. **Abschluss**: Nach Erreichen der Ziele oder dem Ende der Laufzeit wird das Mandat abgeschlossen. Eine abschließende Bewertung wird durchgeführt, um den Erfolg zu messen und Erkenntnisse für zukünftige Mandate zu gewinnen.
5. **Archivierung**: Alle relevanten Dokumente und Berichte werden archiviert, um eine Nachverfolgbarkeit und Referenz für zukünftige Projekte zu gewährleisten.
## Managing Mandates
### Verwaltung von Mandaten
Die Verwaltung von Mandaten erfordert eine kontinuierliche Überwachung und Anpassung, um sicherzustellen, dass die gesetzten Ziele erreicht werden. Zu den wichtigsten Verwaltungsaufgaben gehören:
- **Statusüberprüfung**: Regelmäßige Überprüfung des Status und Fortschritts des Mandats.
- **Kommunikation**: Sicherstellen, dass alle Beteiligten über den Fortschritt und etwaige Änderungen informiert sind.
- **Risikomanagement**: Identifizierung und Management potenzieller Risiken, die den Erfolg des Mandats gefährden könnten.
- **Berichterstattung**: Erstellung regelmäßiger Berichte, um den Fortschritt zu dokumentieren und Transparenz zu gewährleisten.
Durch die sorgfältige Erstellung und Verwaltung von Mandaten wird sichergestellt, dass das Agenten-Chat-System effizient und effektiv arbeitet, um die gesteckten Ziele zu erreichen.
# Attribute Handling
In diesem Abschnitt des "Agent Chat System Handbook" wird die Handhabung von Attributen im System detailliert beschrieben. Attribute sind wesentliche Komponenten, die zur Verwaltung und Organisation von Daten innerhalb des Systems verwendet werden. Dieser Abschnitt behandelt die verschiedenen Typen von Attributen und die Operationen, die auf ihnen ausgeführt werden können.
## Typen von Attributen
Attribute im Agent Chat System sind in verschiedene Kategorien unterteilt, die jeweils spezifische Funktionen und Anwendungsbereiche haben. Die wichtigsten Attributtypen sind:
### 1. Systemattribute
Systemattribute sind vordefinierte Attribute, die für die grundlegende Funktionalität des Systems erforderlich sind. Sie werden automatisch vom System verwaltet und können nicht vom Benutzer geändert werden. Beispiele für Systemattribute sind Benutzer-ID, Erstellungsdatum und Änderungsdatum.
### 2. Benutzerdefinierte Attribute
Benutzerdefinierte Attribute werden von den Benutzern erstellt, um spezifische Anforderungen zu erfüllen. Diese Attribute bieten Flexibilität und Anpassungsfähigkeit, indem sie es den Benutzern ermöglichen, zusätzliche Informationen zu speichern, die nicht durch Systemattribute abgedeckt sind. Ein Beispiel könnte ein Attribut für die bevorzugte Sprache eines Benutzers sein.
### 3. Temporäre Attribute
Temporäre Attribute werden für kurzfristige Zwecke erstellt und haben eine begrenzte Lebensdauer. Sie werden häufig in Sitzungen oder für bestimmte Operationen verwendet, bei denen die Daten nicht dauerhaft gespeichert werden müssen. Ein Beispiel wäre ein Attribut, das während einer Chat-Sitzung verwendet wird, um den aktuellen Status eines Gesprächs zu verfolgen.
## Operationen auf Attributen
Die Verwaltung von Attributen umfasst eine Vielzahl von Operationen, die es ermöglichen, Attribute zu erstellen, zu ändern, zu löschen und zu analysieren. Die wichtigsten Operationen sind:
### 1. Erstellung von Attributen
Die Erstellung von Attributen erfolgt entweder automatisch durch das System (für Systemattribute) oder manuell durch den Benutzer (für benutzerdefinierte Attribute). Bei der Erstellung eines Attributs müssen der Attributtyp, der Name und der Datentyp spezifiziert werden.
### 2. Aktualisierung von Attributen
Attribute können aktualisiert werden, um Änderungen in den Daten widerzuspiegeln. Dies umfasst das Ändern von Attributwerten oder das Aktualisieren von Attributmetadaten. Beispielsweise kann ein Benutzer das Attribut "Telefonnummer" aktualisieren, um eine neue Nummer zu speichern.
### 3. Löschung von Attributen
Nicht mehr benötigte Attribute können gelöscht werden. Bei der Löschung von Attributen ist Vorsicht geboten, da dies irreversible Änderungen an den gespeicherten Daten zur Folge haben kann. Systemattribute können in der Regel nicht gelöscht werden, um die Integrität des Systems zu gewährleisten.
### 4. Abfrage von Attributen
Das System ermöglicht die Abfrage von Attributen, um Informationen zu extrahieren und Berichte zu erstellen. Dies ist besonders nützlich für die Analyse von Daten und die Generierung von Einblicken. Beispielsweise kann ein Administrator eine Abfrage durchführen, um alle Benutzer mit einem bestimmten Attributwert zu identifizieren.
### 5. Validierung von Attributen
Die Validierung von Attributen stellt sicher, dass die eingegebenen Daten den festgelegten Kriterien entsprechen. Dies umfasst die Überprüfung von Datentypen, Wertebereichen und anderen Einschränkungen. Eine korrekte Validierung ist entscheidend, um Datenintegrität und -konsistenz zu gewährleisten.
Durch das Verständnis der verschiedenen Attributtypen und der auf ihnen ausführbaren Operationen können Benutzer das Agent Chat System effektiver nutzen und an ihre spezifischen Bedürfnisse anpassen.
# Prompt Management
In diesem Abschnitt des "Agent Chat System Handbook" wird die Verwaltung von Prompts behandelt. Prompts sind wesentliche Bestandteile des Agent Chat Systems, da sie die Interaktion zwischen Benutzern und dem System steuern. Dieser Abschnitt bietet eine detaillierte Anleitung zur Erstellung und Nutzung von Prompts.
## Erstellen von Prompts
Die Erstellung von Prompts ist ein zentraler Bestandteil der Systemkonfiguration und ermöglicht es, spezifische Anfragen oder Anweisungen für die Interaktion mit dem System zu definieren.
### Schritte zur Erstellung von Prompts
1. **Identifikation des Bedarfs**: Bestimmen Sie den spezifischen Bedarf oder das Szenario, für das ein Prompt erforderlich ist. Dies könnte eine häufig gestellte Frage oder eine spezifische Anweisung sein, die regelmäßig benötigt wird.
2. **Definition des Inhalts**: Formulieren Sie den Inhalt des Prompts klar und präzise. Der Inhalt sollte direkt und verständlich sein, um Missverständnisse zu vermeiden.
3. **Formatierung**: Achten Sie darauf, dass der Prompt in einem konsistenten Format erstellt wird, das mit den anderen Systemkomponenten kompatibel ist. Nutzen Sie Markdown oder andere unterstützte Formate, um die Lesbarkeit zu verbessern.
4. **Implementierung im System**: Integrieren Sie den erstellten Prompt in das System. Dies kann durch die Anpassung der entsprechenden Konfigurationsdateien oder durch die Nutzung der API-Schnittstellen erfolgen.
### Beispiel
```json
{
"prompt_id": "faq_shipping",
"content": "Wie lange dauert der Versand?",
"response": "Der Versand dauert in der Regel 3-5 Werktage."
}
```
## Nutzung von Prompts
Die Nutzung von Prompts ist entscheidend, um eine effiziente und konsistente Kommunikation innerhalb des Agent Chat Systems sicherzustellen.
### Schritte zur Nutzung von Prompts
1. **Abrufen von Prompts**: Verwenden Sie die API-Endpunkte, um verfügbare Prompts abzurufen. Dies ermöglicht es Agenten, schnell auf vorgefertigte Antworten zuzugreifen.
2. **Anpassung an den Kontext**: Stellen Sie sicher, dass der ausgewählte Prompt dem aktuellen Kontext der Benutzeranfrage entspricht. Passen Sie den Inhalt gegebenenfalls an, um spezifische Details oder Variationen zu berücksichtigen.
3. **Feedback und Optimierung**: Sammeln Sie regelmäßig Feedback zur Effektivität der Prompts und optimieren Sie diese basierend auf den Rückmeldungen. Dies kann durch die Analyse von Benutzerinteraktionen und die Anpassung der Inhalte erfolgen.
### Beispiel
Ein Agent erhält eine Anfrage zu den Versandzeiten. Anstatt die Antwort manuell zu formulieren, ruft der Agent den entsprechenden Prompt ab und liefert eine konsistente und schnelle Antwort.
```json
{
"user_query": "Wann kommt meine Bestellung an?",
"prompt_used": "faq_shipping",
"response": "Der Versand dauert in der Regel 3-5 Werktage."
}
```
Durch die strukturierte Verwaltung und Nutzung von Prompts wird die Effizienz des Agent Chat Systems erheblich gesteigert, was zu einer verbesserten Benutzererfahrung führt.
# File Operations
In diesem Abschnitt des "Agent Chat System Handbook" werden die wesentlichen Aspekte der Dateiverwaltung und -speicherung im Rahmen des Agent Chat Systems behandelt. Diese Informationen sind entscheidend für die technische Verwaltung und den Betrieb des Systems. Der Abschnitt ist in zwei Hauptunterabschnitte unterteilt: "Handling Files" und "Storing Files".
## Handling Files
Die Handhabung von Dateien ist ein zentraler Bestandteil des Agent Chat Systems, da es die Interaktion mit verschiedenen Dateitypen ermöglicht, die für die Funktionalität des Systems erforderlich sind.
### Dateiverwaltung
- **Öffnen und Schließen von Dateien**: Dateien sollten mit geeigneten Methoden geöffnet und geschlossen werden, um Datenverlust oder -beschädigung zu vermeiden. In Python wird dies häufig mit dem `with`-Statement erreicht, das sicherstellt, dass Dateien nach der Verwendung ordnungsgemäß geschlossen werden.
```python
with open('datei.txt', 'r') as file:
inhalt = file.read()
```
- **Lesen und Schreiben von Dateien**: Das System unterstützt sowohl das Lesen als auch das Schreiben von Dateien. Es ist wichtig, die korrekten Modi (`'r'` für Lesen, `'w'` für Schreiben, `'a'` für Anhängen) zu verwenden, um die Integrität der Daten zu gewährleisten.
- **Fehlerbehandlung**: Beim Umgang mit Dateien können verschiedene Fehler auftreten, wie z.B. `FileNotFoundError` oder `IOError`. Eine robuste Fehlerbehandlung ist notwendig, um das System vor unerwarteten Abstürzen zu schützen.
```python
try:
with open('datei.txt', 'r') as file:
inhalt = file.read()
except FileNotFoundError:
print("Die Datei wurde nicht gefunden.")
```
## Storing Files
Die Speicherung von Dateien ist ein weiterer kritischer Aspekt, der sicherstellt, dass Daten sicher und effizient abgelegt werden.
### Dateispeicherung
- **Verzeichnisstruktur**: Eine gut organisierte Verzeichnisstruktur ist entscheidend für die effiziente Speicherung und den schnellen Zugriff auf Dateien. Es wird empfohlen, Dateien in thematisch geordneten Unterverzeichnissen zu speichern.
- **Datenbankintegration**: In einigen Fällen kann es notwendig sein, Dateien in einer Datenbank zu speichern, insbesondere wenn Metadaten oder eine hohe Zugriffsgeschwindigkeit erforderlich sind. Das System kann Datenbanklösungen wie PostgreSQL oder MongoDB integrieren, um Dateien und ihre Metadaten zu verwalten.
- **Sicherheitsaspekte**: Bei der Speicherung von Dateien müssen Sicherheitsaspekte berücksichtigt werden, insbesondere wenn es sich um sensible Daten handelt. Dies umfasst die Verschlüsselung von Dateien und die Implementierung von Zugriffsberechtigungen.
- **Backup und Wiederherstellung**: Regelmäßige Backups sind unerlässlich, um Datenverlust zu vermeiden. Das System sollte über Mechanismen zur automatisierten Sicherung und Wiederherstellung von Dateien verfügen.
Durch die Beachtung dieser Richtlinien und Praktiken wird sichergestellt, dass das Agent Chat System Dateien effizient und sicher handhabt und speichert, was zu einem reibungslosen Betrieb und einer hohen Zuverlässigkeit des Systems beiträgt.
# Workflow Management
In diesem Abschnitt des "Agent Chat System Handbook" wird das Workflow-Management detailliert beschrieben. Das Ziel ist es, den technischen Benutzern ein umfassendes Verständnis für die Erstellung und Ausführung von Workflows innerhalb des Agent Chat Systems zu vermitteln.
## Inhaltsverzeichnis
1. [Erstellung von Workflows](#erstellung-von-workflows)
2. [Ausführung von Workflows](#ausfuehrung-von-workflows)
## Erstellung von Workflows
Die Erstellung von Workflows ist ein zentraler Bestandteil des Workflow-Managements im Agent Chat System. Ein Workflow definiert eine Abfolge von Schritten, die automatisiert oder manuell ausgeführt werden können, um spezifische Aufgaben oder Prozesse zu steuern.
### Schritte zur Erstellung eines Workflows
1. **Identifikation der Anforderungen**: Bestimmen Sie die spezifischen Anforderungen und Ziele des Workflows. Dies könnte die Automatisierung von Kundenanfragen oder die Verwaltung von Support-Tickets umfassen.
2. **Definition der Schritte**: Listen Sie die einzelnen Schritte auf, die zur Erreichung des Workflows erforderlich sind. Jeder Schritt sollte klar definiert und in einer logischen Reihenfolge angeordnet sein.
3. **Konfiguration der Aktionen**: Weisen Sie jedem Schritt spezifische Aktionen zu. Diese Aktionen könnten API-Aufrufe, Datenbankabfragen oder Benachrichtigungen umfassen.
4. **Erstellung von Bedingungen**: Definieren Sie Bedingungen, die den Fluss des Workflows steuern. Bedingungen können auf Ereignissen, Datenwerten oder Benutzerinteraktionen basieren.
5. **Testen des Workflows**: Vor der Implementierung sollte der Workflow in einer Testumgebung ausgeführt werden, um sicherzustellen, dass alle Schritte korrekt funktionieren.
6. **Dokumentation**: Dokumentieren Sie den Workflow umfassend, einschließlich der Ziele, Schritte, Bedingungen und erwarteten Ergebnisse.
### Beispiel
```yaml
- name: "Kundenanfrage-Workflow"
steps:
- step: "Anfrage erhalten"
action: "API-Aufruf"
- step: "Anfrage analysieren"
action: "AI-Analyse"
- step: "Antwort generieren"
action: "Textgenerierung"
- step: "Antwort senden"
action: "Benachrichtigung"
conditions:
- if: "Anfrage enthält 'dringend'"
then: "Priorität hochsetzen"
```
## Ausführung von Workflows
Die Ausführung von Workflows ist der Prozess, bei dem die definierten Schritte eines Workflows in der Praxis umgesetzt werden. Dies kann manuell durch einen Benutzer oder automatisch durch das System erfolgen.
### Schritte zur Ausführung eines Workflows
1. **Initiierung**: Der Workflow wird entweder durch ein Ereignis, eine Benutzeraktion oder einen Zeitplan initiiert.
2. **Verarbeitung der Schritte**: Jeder Schritt des Workflows wird in der festgelegten Reihenfolge ausgeführt. Das System überwacht den Fortschritt und stellt sicher, dass alle Bedingungen erfüllt sind, bevor zum nächsten Schritt übergegangen wird.
3. **Überwachung und Protokollierung**: Während der Ausführung werden alle Aktionen und Ergebnisse protokolliert. Dies ermöglicht eine spätere Analyse und Fehlerbehebung.
4. **Fehlerbehandlung**: Bei Auftreten eines Fehlers wird der Workflow entweder pausiert oder abgebrochen, je nach Konfiguration. Fehlerprotokolle werden erstellt, um die Ursache zu identifizieren und zu beheben.
5. **Abschluss**: Nach erfolgreicher Ausführung aller Schritte wird der Workflow abgeschlossen und eine Zusammenfassung der Ergebnisse erstellt.
### Beispiel
```json
{
"workflow_id": "12345",
"status": "in_progress",
"current_step": "Anfrage analysieren",
"logs": [
{"timestamp": "2023-10-01T10:00:00Z", "message": "Anfrage erhalten"},
{"timestamp": "2023-10-01T10:01:00Z", "message": "Anfrage analysieren gestartet"}
]
}
```
Durch die sorgfältige Erstellung und Ausführung von Workflows können Unternehmen die Effizienz und Genauigkeit ihrer Prozesse erheblich verbessern. Das Agent Chat System bietet die Flexibilität und Kontrolle, die erforderlich sind, um komplexe Workflows effektiv zu verwalten.
# AI Integration
In diesem Abschnitt wird die Integration von Künstlicher Intelligenz (KI) in das Agent Chat System detailliert beschrieben. Der Fokus liegt auf den verwendeten KI-Modellen und den Integrationspunkten innerhalb des Systems. Diese Informationen sind entscheidend für das Verständnis der technischen Architektur und der Funktionsweise der KI-Komponenten.
## AI Models
### Verwendete KI-Modelle
Das Agent Chat System nutzt fortschrittliche KI-Modelle, um die Interaktion zwischen Agenten und Nutzern zu optimieren. Diese Modelle sind darauf ausgelegt, natürliche Sprachverarbeitung (NLP) zu unterstützen und kontextbezogene Antworten zu generieren. Die wichtigsten Modelle umfassen:
- **GPT-3**: Ein leistungsstarkes Sprachmodell, das für die Generierung von menschenähnlichen Texten verwendet wird. Es ist in der Lage, komplexe Anfragen zu verstehen und relevante Antworten zu liefern.
- **BERT**: Ein Modell, das für Aufgaben der Sprachverständnisoptimierung eingesetzt wird, insbesondere bei der Analyse von Benutzeranfragen und der Extraktion von Schlüsselinformationen.
- **Custom Sentiment Analysis Model**: Ein speziell entwickeltes Modell zur Analyse der Stimmung in Benutzeranfragen, um die Reaktionen der Agenten entsprechend anzupassen.
Diese Modelle werden kontinuierlich aktualisiert und optimiert, um die Effizienz und Genauigkeit der Interaktionen zu verbessern.
## Integration
### Integrationspunkte
Die Integration der KI-Modelle erfolgt an mehreren strategischen Punkten innerhalb des Agent Chat Systems. Diese Integrationspunkte sind entscheidend für die nahtlose Funktionalität und umfassen:
- **Anfrageverarbeitung**: Bei der Eingabe einer Benutzeranfrage wird diese zunächst durch das NLP-Modul geleitet, das die Anfrage analysiert und an das entsprechende KI-Modell weiterleitet.
- **Antwortgenerierung**: Die generierten Antworten werden durch das GPT-3-Modell erstellt und anschließend durch das Sentiment Analysis Model überprüft, um sicherzustellen, dass die Antwort dem emotionalen Kontext des Benutzers entspricht.
- **Datenanalyse**: Die gesammelten Daten aus den Interaktionen werden durch BERT analysiert, um Muster und Trends zu identifizieren, die zur Verbesserung der Systemleistung beitragen können.
- **Feedback-Schleife**: Eine kontinuierliche Feedback-Schleife ermöglicht es, die Modelle basierend auf Benutzerinteraktionen und Agenten-Feedback zu verfeinern und anzupassen.
### Technische Implementierung
Die Implementierung der KI-Integration erfolgt über spezialisierte APIs, die in die FastAPI-Anwendung eingebettet sind. Diese APIs ermöglichen eine effiziente Kommunikation zwischen den verschiedenen Modulen und den KI-Modellen. Die Integration ist so gestaltet, dass sie skalierbar und erweiterbar ist, um zukünftige Anforderungen und technologische Fortschritte zu berücksichtigen.
Durch die sorgfältige Auswahl und Integration dieser KI-Modelle wird sichergestellt, dass das Agent Chat System nicht nur effizient, sondern auch flexibel und anpassungsfähig bleibt, um den sich ständig ändernden Anforderungen der Benutzer gerecht zu werden.
## Authentication
```md
# Authentication
In diesem Abschnitt des "Agent Chat System Handbook" wird das Authentifizierungssystem detailliert beschrieben. Die Authentifizierung ist ein kritischer Bestandteil des Systems, der sicherstellt, dass nur autorisierte Benutzer Zugriff auf die Anwendung und ihre Funktionen haben. Wir werden die verschiedenen Authentifizierungsmethoden sowie die Sicherheitsmaßnahmen, die implementiert wurden, um die Integrität und Vertraulichkeit der Benutzerdaten zu gewährleisten, untersuchen.
## Methoden
### Authentifizierungsmethoden
Das Agent Chat System unterstützt mehrere Authentifizierungsmethoden, um Flexibilität und Sicherheit zu bieten. Die wichtigsten Methoden sind:
1. **Token-basierte Authentifizierung**:
- **Beschreibung**: Diese Methode verwendet JSON Web Tokens (JWT), um Benutzer zu authentifizieren. Nach erfolgreicher Anmeldung erhält der Benutzer ein Token, das bei jeder Anfrage an den Server gesendet wird.
- **Vorteile**: Erhöhte Sicherheit durch zeitlich begrenzte Token und die Möglichkeit, Token zu widerrufen.
- **Implementierung**: Der Token wird im Header der HTTP-Anfrage übermittelt und vom Server validiert.
2. **OAuth 2.0**:
- **Beschreibung**: OAuth 2.0 ist ein weit verbreitetes Protokoll, das es Benutzern ermöglicht, sich mit ihren bestehenden Konten von Drittanbietern (z.B. Google, Facebook) anzumelden.
- **Vorteile**: Benutzerfreundlichkeit und erhöhte Sicherheit, da keine Passwörter direkt im System gespeichert werden müssen.
- **Implementierung**: Die Anwendung leitet den Benutzer zur Authentifizierungsseite des Drittanbieters weiter und erhält nach erfolgreicher Authentifizierung ein Zugriffstoken.
3. **Zwei-Faktor-Authentifizierung (2FA)**:
- **Beschreibung**: Diese Methode fügt eine zusätzliche Sicherheitsebene hinzu, indem sie einen zweiten Authentifizierungsfaktor erfordert, z.B. einen SMS-Code oder eine Authentifizierungs-App.
- **Vorteile**: Erhöhte Sicherheit durch die Kombination von etwas, das der Benutzer kennt (Passwort) und etwas, das der Benutzer hat (zweiter Faktor).
- **Implementierung**: Nach der Eingabe des Passworts wird der Benutzer aufgefordert, den zweiten Faktor einzugeben, bevor der Zugriff gewährt wird.
## Sicherheit
### Sicherheitsmaßnahmen
Um die Sicherheit der Authentifizierung im Agent Chat System zu gewährleisten, wurden mehrere Maßnahmen implementiert:
1. **Datenverschlüsselung**:
- Alle sensiblen Daten, einschließlich Passwörter und Token, werden mit starken Verschlüsselungsalgorithmen gespeichert und übertragen. Dies schützt die Daten vor unbefugtem Zugriff und Manipulation.
2. **Sichere Passwortspeicherung**:
- Passwörter werden nicht im Klartext gespeichert. Stattdessen werden sie mit einem sicheren Hashing-Algorithmus (z.B. bcrypt) gehasht, bevor sie in der Datenbank gespeichert werden.
3. **Regelmäßige Sicherheitsüberprüfungen**:
- Das System wird regelmäßig auf Sicherheitslücken überprüft, und es werden Patches und Updates angewendet, um bekannte Schwachstellen zu beheben.
4. **Sitzungsverwaltung**:
- Sitzungen werden überwacht und bei Inaktivität automatisch abgemeldet, um das Risiko von Sitzungsentführungen zu minimieren.
5. **Protokollierung und Überwachung**:
- Alle Authentifizierungsversuche und sicherheitsrelevanten Ereignisse werden protokolliert und überwacht, um verdächtige Aktivitäten frühzeitig zu erkennen und darauf zu reagieren.
Durch die Implementierung dieser Methoden und Sicherheitsmaßnahmen stellt das Agent Chat System sicher, dass die Authentifizierung sowohl benutzerfreundlich als auch sicher ist, und schützt die Integrität und Vertraulichkeit der Benutzerdaten effektiv.
```
## Conclusion
```md
## Fazit
In diesem Handbuch zum "Agent Chat System" haben wir die wesentlichen Komponenten und Prozesse detailliert beschrieben, die für den erfolgreichen Einsatz und die Verwaltung eines Chat-Agenten-Systems erforderlich sind. Die behandelten Themen umfassen die Einrichtung der FastAPI-Anwendung, das Benutzer- und Mandatsmanagement, die Attributverwaltung, das Prompt-Management, Dateioperationen, das Workflow-Management, die Integration von Künstlicher Intelligenz sowie die Authentifizierung.
### Zusammenfassung der Hauptpunkte
1. **FastAPI Setup**: Wir haben die Schritte zur Einrichtung und Konfiguration der FastAPI-Anwendung erläutert, um eine stabile Grundlage für das Agentensystem zu schaffen.
2. **Benutzerverwaltung**: Die Verwaltung von Benutzern und deren Rollen ist entscheidend für die Sicherheit und Effizienz des Systems. Wir haben die Methoden zur Erstellung, Aktualisierung und Löschung von Benutzerkonten behandelt.
3. **AI-Integration**: Die Integration von KI-Technologien ermöglicht es dem System, intelligentere und kontextbezogene Antworten zu generieren. Wir haben die Implementierung und Optimierung dieser Funktionalität beschrieben.
4. **Authentifizierung**: Sicherheit ist ein zentrales Element jeder Anwendung. Wir haben die Authentifizierungsmechanismen und deren Implementierung im System detailliert dargestellt.
### Empfehlungen und nächste Schritte
- **Regelmäßige Updates**: Stellen Sie sicher, dass alle Systemkomponenten regelmäßig aktualisiert werden, um Sicherheitslücken zu schließen und die Leistung zu optimieren.
- **Erweiterung der AI-Funktionalitäten**: Erwägen Sie die Implementierung fortschrittlicherer KI-Modelle, um die Interaktionsqualität weiter zu verbessern.
- **Benutzerfeedback einholen**: Nutzen Sie das Feedback der Benutzer, um kontinuierlich Verbesserungen am System vorzunehmen.
### Bedeutung des Dokuments
Dieses Handbuch dient als umfassende Ressource für technische Fachleute, die für die Implementierung und Wartung des Agent Chat Systems verantwortlich sind. Es bietet nicht nur eine detaillierte Anleitung zur Einrichtung und Verwaltung des Systems, sondern auch wertvolle Einblicke in die Optimierung der Benutzererfahrung und der Systemleistung. Mit diesem Wissen sind Sie bestens gerüstet, um ein effizientes und sicheres Chat-Agenten-System zu betreiben.
Wir hoffen, dass dieses Handbuch Ihnen als wertvolle Referenz dient und Sie bei der erfolgreichen Implementierung und Verwaltung Ihres Agent Chat Systems unterstützt.
```

View file

@ -592,16 +592,16 @@ async def previewFile(
base64Encoded = not isText base64Encoded = not isText
if isText: if isText:
# Convert to string and limit to 1000 chars for preview # Convert to string without trim for preview
if isinstance(fileData, bytes): if isinstance(fileData, bytes):
try: try:
filePreview = fileData.decode('utf-8')[:1000] filePreview = fileData.decode('utf-8')
previewData = filePreview previewData = filePreview
except UnicodeDecodeError: except UnicodeDecodeError:
# Try other encodings # Try other encodings
for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
try: try:
filePreview = fileData.decode(encoding)[:1000] filePreview = fileData.decode(encoding)
previewData = filePreview previewData = filePreview
break break
except UnicodeDecodeError: except UnicodeDecodeError:
@ -613,7 +613,7 @@ async def previewFile(
previewData = base64.b64encode(fileData).decode('utf-8') previewData = base64.b64encode(fileData).decode('utf-8')
base64Encoded = True base64Encoded = True
# Return file metadata with limited preview and base64Encoded flag # Return file metadata with preview and base64Encoded flag
return { return {
"id": fileId, "id": fileId,
"name": file.get("name"), "name": file.get("name"),

View file

@ -1,77 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(123)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(123)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,40 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
# REQUIREMENTS:
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(202)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(202)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,202 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677
683
691
701
709
719
727
733
739
743
751
757
761
769
773
787
797
809
811
821
823
827
829
839
853
857
859
863
877
881
883
887
907
911
919
929
937
941
947
953
967
971
977
983
991
997
1009
1013
1019
1021
1031
1033
1039
1049
1051
1061
1063
1069
1087
1091
1093
1097
1103
1109
1117
1123
1129
1151
1153
1163
1171
1181
1187
1193
1201
1213
1217
1223
1229
1231

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(limit):
primes = []
num = 2
while len(primes) < limit:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(1000)
primes_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": primes_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

File diff suppressed because one or more lines are too long

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(999)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

File diff suppressed because one or more lines are too long

View file

@ -1,999 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677
683
691
701
709
719
727
733
739
743
751
757
761
769
773
787
797
809
811
821
823
827
829
839
853
857
859
863
877
881
883
887
907
911
919
929
937
941
947
953
967
971
977
983
991
997
1009
1013
1019
1021
1031
1033
1039
1049
1051
1061
1063
1069
1087
1091
1093
1097
1103
1109
1117
1123
1129
1151
1153
1163
1171
1181
1187
1193
1201
1213
1217
1223
1229
1231
1237
1249
1259
1277
1279
1283
1289
1291
1297
1301
1303
1307
1319
1321
1327
1361
1367
1373
1381
1399
1409
1423
1427
1429
1433
1439
1447
1451
1453
1459
1471
1481
1483
1487
1489
1493
1499
1511
1523
1531
1543
1549
1553
1559
1567
1571
1579
1583
1597
1601
1607
1609
1613
1619
1621
1627
1637
1657
1663
1667
1669
1693
1697
1699
1709
1721
1723
1733
1741
1747
1753
1759
1777
1783
1787
1789
1801
1811
1823
1831
1847
1861
1867
1871
1873
1877
1879
1889
1901
1907
1913
1931
1933
1949
1951
1973
1979
1987
1993
1997
1999
2003
2011
2017
2027
2029
2039
2053
2063
2069
2081
2083
2087
2089
2099
2111
2113
2129
2131
2137
2141
2143
2153
2161
2179
2203
2207
2213
2221
2237
2239
2243
2251
2267
2269
2273
2281
2287
2293
2297
2309
2311
2333
2339
2341
2347
2351
2357
2371
2377
2381
2383
2389
2393
2399
2411
2417
2423
2437
2441
2447
2459
2467
2473
2477
2503
2521
2531
2539
2543
2549
2551
2557
2579
2591
2593
2609
2617
2621
2633
2647
2657
2659
2663
2671
2677
2683
2687
2689
2693
2699
2707
2711
2713
2719
2729
2731
2741
2749
2753
2767
2777
2789
2791
2797
2801
2803
2819
2833
2837
2843
2851
2857
2861
2879
2887
2897
2903
2909
2917
2927
2939
2953
2957
2963
2969
2971
2999
3001
3011
3019
3023
3037
3041
3049
3061
3067
3079
3083
3089
3109
3119
3121
3137
3163
3167
3169
3181
3187
3191
3203
3209
3217
3221
3229
3251
3253
3257
3259
3271
3299
3301
3307
3313
3319
3323
3329
3331
3343
3347
3359
3361
3371
3373
3389
3391
3407
3413
3433
3449
3457
3461
3463
3467
3469
3491
3499
3511
3517
3527
3529
3533
3539
3541
3547
3557
3559
3571
3581
3583
3593
3607
3613
3617
3623
3631
3637
3643
3659
3671
3673
3677
3691
3697
3701
3709
3719
3727
3733
3739
3761
3767
3769
3779
3793
3797
3803
3821
3823
3833
3847
3851
3853
3863
3877
3881
3889
3907
3911
3917
3919
3923
3929
3931
3943
3947
3967
3989
4001
4003
4007
4013
4019
4021
4027
4049
4051
4057
4073
4079
4091
4093
4099
4111
4127
4129
4133
4139
4153
4157
4159
4177
4201
4211
4217
4219
4229
4231
4241
4243
4253
4259
4261
4271
4273
4283
4289
4297
4327
4337
4339
4349
4357
4363
4373
4391
4397
4409
4421
4423
4441
4447
4451
4457
4463
4481
4483
4493
4507
4513
4517
4519
4523
4547
4549
4561
4567
4583
4591
4597
4603
4621
4637
4639
4643
4649
4651
4657
4663
4673
4679
4691
4703
4721
4723
4729
4733
4751
4759
4783
4787
4789
4793
4799
4801
4813
4817
4831
4861
4871
4877
4889
4903
4909
4919
4931
4933
4937
4943
4951
4957
4967
4969
4973
4987
4993
4999
5003
5009
5011
5021
5023
5039
5051
5059
5077
5081
5087
5099
5101
5107
5113
5119
5147
5153
5167
5171
5179
5189
5197
5209
5227
5231
5233
5237
5261
5273
5279
5281
5297
5303
5309
5323
5333
5347
5351
5381
5387
5393
5399
5407
5413
5417
5419
5431
5437
5441
5443
5449
5471
5477
5479
5483
5501
5503
5507
5519
5521
5527
5531
5557
5563
5569
5573
5581
5591
5623
5639
5641
5647
5651
5653
5657
5659
5669
5683
5689
5693
5701
5711
5717
5737
5741
5743
5749
5779
5783
5791
5801
5807
5813
5821
5827
5839
5843
5849
5851
5857
5861
5867
5869
5879
5881
5897
5903
5923
5927
5939
5953
5981
5987
6007
6011
6029
6037
6043
6047
6053
6067
6073
6079
6089
6091
6101
6113
6121
6131
6133
6143
6151
6163
6173
6197
6199
6203
6211
6217
6221
6229
6247
6257
6263
6269
6271
6277
6287
6299
6301
6311
6317
6323
6329
6337
6343
6353
6359
6361
6367
6373
6379
6389
6397
6421
6427
6449
6451
6469
6473
6481
6491
6521
6529
6547
6551
6553
6563
6569
6571
6577
6581
6599
6607
6619
6637
6653
6659
6661
6673
6679
6689
6691
6701
6703
6709
6719
6733
6737
6761
6763
6779
6781
6791
6793
6803
6823
6827
6829
6833
6841
6857
6863
6869
6871
6883
6899
6907
6911
6917
6947
6949
6959
6961
6967
6971
6977
6983
6991
6997
7001
7013
7019
7027
7039
7043
7057
7069
7079
7103
7109
7121
7127
7129
7151
7159
7177
7187
7193
7207
7211
7213
7219
7229
7237
7243
7247
7253
7283
7297
7307
7309
7321
7331
7333
7349
7351
7369
7393
7411
7417
7433
7451
7457
7459
7477
7481
7487
7489
7499
7507
7517
7523
7529
7537
7541
7547
7549
7559
7561
7573
7577
7583
7589
7591
7603
7607
7621
7639
7643
7649
7669
7673
7681
7687
7691
7699
7703
7717
7723
7727
7741
7753
7757
7759
7789
7793
7817
7823
7829
7841
7853
7867
7873
7877
7879
7883
7901
7907

View file

@ -1,39 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
import json
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(779)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
print(json.dumps(result))

File diff suppressed because one or more lines are too long

View file

@ -1,779 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677
683
691
701
709
719
727
733
739
743
751
757
761
769
773
787
797
809
811
821
823
827
829
839
853
857
859
863
877
881
883
887
907
911
919
929
937
941
947
953
967
971
977
983
991
997
1009
1013
1019
1021
1031
1033
1039
1049
1051
1061
1063
1069
1087
1091
1093
1097
1103
1109
1117
1123
1129
1151
1153
1163
1171
1181
1187
1193
1201
1213
1217
1223
1229
1231
1237
1249
1259
1277
1279
1283
1289
1291
1297
1301
1303
1307
1319
1321
1327
1361
1367
1373
1381
1399
1409
1423
1427
1429
1433
1439
1447
1451
1453
1459
1471
1481
1483
1487
1489
1493
1499
1511
1523
1531
1543
1549
1553
1559
1567
1571
1579
1583
1597
1601
1607
1609
1613
1619
1621
1627
1637
1657
1663
1667
1669
1693
1697
1699
1709
1721
1723
1733
1741
1747
1753
1759
1777
1783
1787
1789
1801
1811
1823
1831
1847
1861
1867
1871
1873
1877
1879
1889
1901
1907
1913
1931
1933
1949
1951
1973
1979
1987
1993
1997
1999
2003
2011
2017
2027
2029
2039
2053
2063
2069
2081
2083
2087
2089
2099
2111
2113
2129
2131
2137
2141
2143
2153
2161
2179
2203
2207
2213
2221
2237
2239
2243
2251
2267
2269
2273
2281
2287
2293
2297
2309
2311
2333
2339
2341
2347
2351
2357
2371
2377
2381
2383
2389
2393
2399
2411
2417
2423
2437
2441
2447
2459
2467
2473
2477
2503
2521
2531
2539
2543
2549
2551
2557
2579
2591
2593
2609
2617
2621
2633
2647
2657
2659
2663
2671
2677
2683
2687
2689
2693
2699
2707
2711
2713
2719
2729
2731
2741
2749
2753
2767
2777
2789
2791
2797
2801
2803
2819
2833
2837
2843
2851
2857
2861
2879
2887
2897
2903
2909
2917
2927
2939
2953
2957
2963
2969
2971
2999
3001
3011
3019
3023
3037
3041
3049
3061
3067
3079
3083
3089
3109
3119
3121
3137
3163
3167
3169
3181
3187
3191
3203
3209
3217
3221
3229
3251
3253
3257
3259
3271
3299
3301
3307
3313
3319
3323
3329
3331
3343
3347
3359
3361
3371
3373
3389
3391
3407
3413
3433
3449
3457
3461
3463
3467
3469
3491
3499
3511
3517
3527
3529
3533
3539
3541
3547
3557
3559
3571
3581
3583
3593
3607
3613
3617
3623
3631
3637
3643
3659
3671
3673
3677
3691
3697
3701
3709
3719
3727
3733
3739
3761
3767
3769
3779
3793
3797
3803
3821
3823
3833
3847
3851
3853
3863
3877
3881
3889
3907
3911
3917
3919
3923
3929
3931
3943
3947
3967
3989
4001
4003
4007
4013
4019
4021
4027
4049
4051
4057
4073
4079
4091
4093
4099
4111
4127
4129
4133
4139
4153
4157
4159
4177
4201
4211
4217
4219
4229
4231
4241
4243
4253
4259
4261
4271
4273
4283
4289
4297
4327
4337
4339
4349
4357
4363
4373
4391
4397
4409
4421
4423
4441
4447
4451
4457
4463
4481
4483
4493
4507
4513
4517
4519
4523
4547
4549
4561
4567
4583
4591
4597
4603
4621
4637
4639
4643
4649
4651
4657
4663
4673
4679
4691
4703
4721
4723
4729
4733
4751
4759
4783
4787
4789
4793
4799
4801
4813
4817
4831
4861
4871
4877
4889
4903
4909
4919
4931
4933
4937
4943
4951
4957
4967
4969
4973
4987
4993
4999
5003
5009
5011
5021
5023
5039
5051
5059
5077
5081
5087
5099
5101
5107
5113
5119
5147
5153
5167
5171
5179
5189
5197
5209
5227
5231
5233
5237
5261
5273
5279
5281
5297
5303
5309
5323
5333
5347
5351
5381
5387
5393
5399
5407
5413
5417
5419
5431
5437
5441
5443
5449
5471
5477
5479
5483
5501
5503
5507
5519
5521
5527
5531
5557
5563
5569
5573
5581
5591
5623
5639
5641
5647
5651
5653
5657
5659
5669
5683
5689
5693
5701
5711
5717
5737
5741
5743
5749
5779
5783
5791
5801
5807
5813
5821
5827
5839
5843
5849
5851
5857
5861
5867
5869
5879
5881
5897
5903
5923
5927

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(limit):
primes = []
num = 2
while len(primes) < limit:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(1000)
primes_content = "\n".join(map(str, primes))
result = {
"first_1000_primes.txt": {
"content": primes_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -1,270 +0,0 @@
"""
Agent Registry Module.
Provides a central registry system for all available agents.
Optimized for the standardized task processing pattern.
"""
import os
import logging
import importlib
import uuid
from datetime import datetime
from typing import Dict, Any, List, Optional
from modules.mimeUtils import isTextMimeType, determineContentEncoding
logger = logging.getLogger(__name__)
"""
Updates to the AgentBase class in workflowAgentsRegistry.py to include base64Encoded flag handling.
"""
class AgentBase:
"""
Base class for all chat agents.
Defines the standardized interface for task processing.
"""
def __init__(self):
"""Initialize the base agent."""
self.name = "base-agent"
self.description = "Basic agent functionality"
self.capabilities = []
self.mydom = None
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
self.mydom = mydom
def getAgentInfo(self) -> Dict[str, Any]:
"""
Return standardized information about the agent's capabilities.
Returns:
Dictionary with name, description, and capabilities
"""
return {
"name": self.name,
"description": self.description,
"capabilities": self.capabilities
}
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a standardized task structure and return results.
This method must be implemented by all concrete agent classes.
Args:
task: A dictionary containing:
- taskId: Unique ID for this task
- workflowId: ID of the parent workflow (optional)
- prompt: The main instruction for the agent
- inputDocuments: List of document objects to process
- outputSpecifications: List of required output documents
- context: Additional contextual information
Returns:
A dictionary containing:
- feedback: Text response explaining what the agent did
- documents: List of document objects created by the agent,
each containing a "base64Encoded" flag in addition to "label" and "content"
"""
# Base implementation - should be overridden by specialized agents
logger.warning(f"Agent {self.name} is using the default implementation of processTask")
return {
"feedback": f"The processTask method was not implemented by agent '{self.name}'.",
"documents": []
}
def determineBase64EncodingFlag(self, filename: str, content: Any, mimeType: str = None) -> bool:
"""Wrapper for the utility function"""
return determineContentEncoding(filename, content, mimeType)
def isTextMimeType(self, mimeType: str) -> bool:
"""Wrapper for the utility function"""
return isTextMimeType(mimeType)
def formatAgentDocumentOutput(self, label: str, content: Any, contentType: str = None) -> Dict[str, Any]:
"""
Helper method to properly format a document output with base64Encoded flag and metadata.
Args:
label: Name of the document
content: Content of the document
contentType: Optional content type for the document
Returns:
Properly formatted document dictionary
"""
import base64
# Determine if content should be base64 encoded
should_base64_encode = self.determineBase64EncodingFlag(label, content)
# Process content based on type and encoding flag
formatted_content = content
if should_base64_encode:
if isinstance(content, bytes):
# Convert binary to base64
formatted_content = base64.b64encode(content).decode('utf-8')
elif isinstance(content, str):
try:
# Check if it's already base64 encoded
base64.b64decode(content)
# If we get here, it appears to be valid base64
formatted_content = content
except:
# Not valid base64, so encode it
formatted_content = base64.b64encode(content.encode('utf-8')).decode('utf-8')
# Create document with metadata
doc = {
"label": label,
"content": formatted_content,
"base64Encoded": should_base64_encode,
"metadata": {}
}
# Add content type if provided
if contentType:
doc["metadata"]["contentType"] = contentType
return doc
class AgentRegistry:
"""Central registry for all available agents in the system."""
_instance = None
@classmethod
def getInstance(cls):
"""Return a singleton instance of the agent registry."""
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
"""Initialize the agent registry."""
if AgentRegistry._instance is not None:
raise RuntimeError("Singleton instance already exists - use getInstance()")
self.agents = {}
self.mydom = None
self._loadAgents()
def _loadAgents(self):
"""Load all available agents from modules."""
logger.info("Loading agent modules...")
# List of agent modules to load
agentModules = []
agentDir = os.path.dirname(__file__)
# Search the directory for agent modules
for filename in os.listdir(agentDir):
if filename.startswith("agent") and filename.endswith(".py"):
agentModules.append(filename[0:-3]) # Remove .py extension
if not agentModules:
logger.warning("No agent modules found")
return
logger.info(f"{len(agentModules)} agent modules found")
# Load each agent module
for moduleName in agentModules:
try:
# Import the module
module = importlib.import_module(f"modules.{moduleName}")
# Look for agent class or get_*_agent function
agentName = moduleName.split("agent")[-1]
className = f"Agent{agentName}"
getterName = f"getAgent{agentName}"
agent = None
# Try to get the agent via the get*Agent function
if hasattr(module, getterName):
getterFunc = getattr(module, getterName)
agent = getterFunc()
logger.info(f"Agent '{agent.name}' loaded via {getterName}()")
# Alternatively, try to instantiate the agent directly
elif hasattr(module, className):
agentClass = getattr(module, className)
agent = agentClass()
logger.info(f"Agent '{agent.name}' directly instantiated")
if agent:
# Register the agent
self.registerAgent(agent)
else:
logger.warning(f"No agent class or getter function found in module {moduleName}")
except ImportError as e:
logger.error(f"Module {moduleName} could not be imported: {e}")
except Exception as e:
logger.error(f"Error loading agent from module {moduleName}: {e}")
def setMydom(self, mydom):
"""Set the AI service for all agents."""
self.mydom = mydom
self.updateAgentDependencies()
def updateAgentDependencies(self):
"""Update dependencies for all registered agents."""
for agentId, agent in self.agents.items():
if hasattr(agent, 'setDependencies'):
agent.setDependencies(mydom=self.mydom)
def registerAgent(self, agent):
"""
Register an agent in the registry.
Args:
agent: The agent to register
"""
agentId = getattr(agent, 'name', "unknown_agent")
# Initialize agent with dependencies
if hasattr(agent, 'setDependencies'):
agent.setDependencies(mydom=self.mydom)
self.agents[agentId] = agent
logger.debug(f"Agent '{agent.name}' registered")
def getAgent(self, agentIdentifier: str):
"""
Return an agent instance
Args:
agentIdentifier: ID or type of the desired agent
Returns:
Agent instance or None if not found
"""
if agentIdentifier in self.agents:
agent = self.agents[agentIdentifier]
# Ensure the agent has the AI service
if hasattr(agent, 'setDependencies') and self.mydom:
agent.setDependencies(mydom=self.mydom)
return agent
logger.error(f"Agent with identifier '{agentIdentifier}' not found")
return None
def getAllAgents(self) -> Dict[str, Any]:
"""Return all registered agents."""
return self.agents
def getAgentInfos(self) -> List[Dict[str, Any]]:
"""Return information about all registered agents."""
agentInfos = []
seenAgents = set()
for agent in self.agents.values():
if agent not in seenAgents:
agentInfos.append(agent.getAgentInfo())
seenAgents.add(agent)
return agentInfos
# Singleton factory for the agent registry
def getAgentRegistry():
return AgentRegistry.getInstance()

View file

@ -1,670 +0,0 @@
"""
Data analyst agent for analysis and interpretation of data.
Focuses on output-first design with AI-powered analysis.
"""
import logging
import json
import io
import base64
from typing import Dict, Any, List
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from modules.workflowAgentsRegistry import AgentBase
logger = logging.getLogger(__name__)
class AgentAnalyst(AgentBase):
"""AI-driven agent for data analysis and visualization"""
def __init__(self):
"""Initialize the data analysis agent"""
super().__init__()
self.name = "analyst"
self.description = "Analyzes data using AI-powered insights and visualizations, produce diagrams and visualizations"
self.capabilities = [
"dataAnalysis",
"statistics",
"visualization",
"dataInterpretation",
"reportGeneration"
]
# Set default visualization settings
plt.style.use('seaborn-v0_8-whitegrid')
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
self.mydom = mydom
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task by focusing on required outputs and using AI to generate them.
Args:
task: Task dictionary with prompt, inputDocuments, outputSpecifications
Returns:
Dictionary with feedback and documents
"""
try:
# Extract task information
prompt = task.get("prompt", "")
inputDocuments = task.get("inputDocuments", [])
outputSpecs = task.get("outputSpecifications", [])
# Check AI service
if not self.mydom:
return {
"feedback": "The Analyst agent requires an AI service to function.",
"documents": []
}
# Extract data from documents - focusing only on dataExtracted
datasets, documentContext = self._extractData(inputDocuments)
# Generate task analysis to understand what's needed
analysisPlan = await self._analyzeTask(prompt, documentContext, datasets, outputSpecs)
# Generate all required output documents
documents = []
# If no output specs provided, create default analysis outputs
if not outputSpecs:
outputSpecs = []
# Process each output specification
for spec in outputSpecs:
outputLabel = spec.get("label", "")
outputDescription = spec.get("description", "")
# Determine type based on file extension
outputType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "txt"
# Generate appropriate content based on output type
if outputType in ['png', 'jpg', 'jpeg', 'svg']:
# Create visualization
document = await self._createVisualization(
datasets, prompt, outputLabel, analysisPlan, outputDescription
)
documents.append(document)
elif outputType in ['csv', 'json', 'xlsx']:
# Create data document
document = await self._createDataDocument(
datasets, prompt, outputLabel, analysisPlan, outputDescription
)
documents.append(document)
else:
# Create text document (report, analysis, etc.)
document = await self._createTextDocument(
datasets, documentContext, prompt, outputLabel,
outputType, analysisPlan, outputDescription
)
documents.append(document)
# Generate feedback
feedback = f"{analysisPlan.get('analysisApproach')}"
if analysisPlan.get("keyInsights"):
feedback += f"\n\n{analysisPlan.get('keyInsights')}"
return {
"feedback": feedback,
"documents": documents
}
except Exception as e:
logger.error(f"Error in analysis: {str(e)}", exc_info=True)
return {
"feedback": f"Error during analysis: {str(e)}",
"documents": []
}
def _extractData(self, documents: List[Dict[str, Any]]) -> tuple:
"""
Extract data from documents, focusing on dataExtracted fields.
Args:
documents: List of input documents
Returns:
Tuple of (datasets dictionary, document context text)
"""
datasets = {}
documentContext = ""
# Process each document
for doc in documents:
docName = doc.get("name", "unnamed")
if doc.get("ext"):
docName = f"{docName}.{doc.get('ext')}"
documentContext += f"\n\n--- {docName} ---\n"
# Process contents
for content in doc.get("contents", []):
# Focus only on dataExtracted
if content.get("dataExtracted"):
extractedText = content.get("dataExtracted", "")
documentContext += extractedText
# Try to parse as structured data if appropriate
if docName.lower().endswith(('.csv', '.tsv')):
try:
df = pd.read_csv(io.StringIO(extractedText))
datasets[docName] = df
except:
pass
elif docName.lower().endswith('.json'):
try:
jsonData = json.loads(extractedText)
if isinstance(jsonData, list):
df = pd.DataFrame(jsonData)
datasets[docName] = df
elif isinstance(jsonData, dict):
# Handle nested JSON structures
if any(isinstance(v, list) for v in jsonData.values()):
for key, value in jsonData.items():
if isinstance(value, list) and len(value) > 0:
df = pd.DataFrame(value)
datasets[f"{docName}:{key}"] = df
else:
df = pd.DataFrame([jsonData])
datasets[docName] = df
except:
pass
# Try to detect tabular data in text content
if docName not in datasets and len(extractedText.splitlines()) > 2:
lines = extractedText.splitlines()
if any(',' in line for line in lines[:5]):
try:
df = pd.read_csv(io.StringIO(extractedText))
if len(df.columns) > 1:
datasets[docName] = df
except:
pass
elif any('\t' in line for line in lines[:5]):
try:
df = pd.read_csv(io.StringIO(extractedText), sep='\t')
if len(df.columns) > 1:
datasets[docName] = df
except:
pass
return datasets, documentContext
async def _analyzeTask(self, prompt: str, context: str, datasets: Dict, outputSpecs: List) -> Dict:
"""
Use AI to analyze the task and create a plan for analysis.
Args:
prompt: The task prompt
context: Document context text
datasets: Dictionary of extracted datasets
outputSpecs: Output specifications
Returns:
Analysis plan dictionary
"""
# Prepare dataset information
datasetInfo = {}
for name, df in datasets.items():
try:
datasetInfo[name] = {
"shape": df.shape,
"columns": df.columns.tolist(),
"dtypes": {col: str(df[col].dtype) for col in df.columns},
"sample": df.head(3).to_dict(orient='records')
}
except:
datasetInfo[name] = {"error": "Could not process dataset"}
analysisPrompt = f"""
Analyze this data analysis task and create a plan.
TASK: {prompt}
AVAILABLE DATA:
{json.dumps(datasetInfo, indent=2)}
DOCUMENT CONTEXT:
{context[:1000]}... (truncated)
OUTPUT REQUIREMENTS:
{json.dumps(outputSpecs, indent=2)}
Create a detailed analysis plan in JSON format with the following structure:
{{
"analysisType": "statistical|trend|comparative|predictive|cluster|general",
"keyQuestions": ["question1", "question2"],
"recommendedVisualizations": [{{
"type": "chart_type",
"dataSource": "dataset_name",
"variables": ["col1", "col2"],
"purpose": "explanation"
}}],
"keyInsights": "brief summary of initial insights",
"analysisApproach": "brief description of recommended approach"
}}
Only return valid JSON. No preamble or explanations.
"""
try:
response = await self.mydom.callAi([
{"role": "system", "content": "You are a data analysis expert. Respond with valid JSON only."},
{"role": "user", "content": analysisPrompt}
], produceUserAnswer = True)
# Extract JSON from response
jsonStart = response.find('{')
jsonEnd = response.rfind('}') + 1
if jsonStart >= 0 and jsonEnd > jsonStart:
plan = json.loads(response[jsonStart:jsonEnd])
return plan
else:
# Fallback if JSON not found
return {
"analysisType": "general",
"keyQuestions": ["What insights can be extracted from this data?"],
"recommendedVisualizations": [],
"keyInsights": "Analysis plan could not be created",
"analysisApproach": "General exploratory analysis"
}
except Exception as e:
logger.warning(f"Error creating analysis plan: {str(e)}")
return {
"analysisType": "general",
"keyQuestions": ["What insights can be extracted from this data?"],
"recommendedVisualizations": [],
"keyInsights": "Analysis plan could not be created",
"analysisApproach": "General exploratory analysis"
}
async def _createVisualization(self, datasets: Dict, prompt: str, outputLabel: str,
analysisPlan: Dict, description: str) -> Dict:
"""
Create visualization document using AI guidance.
Args:
datasets: Dictionary of datasets
prompt: Original task prompt
outputLabel: Output filename
analysisPlan: Analysis plan from AI
description: Output description
Returns:
Visualization document
"""
# Determine format from filename
formatType = outputLabel.split('.')[-1].lower()
if formatType not in ['png', 'jpg', 'jpeg', 'svg']:
formatType = 'png'
# If no datasets available, create error message image
if not datasets:
plt.figure(figsize=(10, 6))
plt.text(0.5, 0.5, "No data available for visualization",
ha='center', va='center', fontsize=14)
plt.tight_layout()
imgData = self._getImageBase64(formatType)
plt.close()
return {
"label": outputLabel,
"content": imgData,
"metadata": {
"contentType": f"image/{formatType}"
}
}
# Get recommended visualization from plan
recommendedViz = analysisPlan.get("recommendedVisualizations", [])
# Prepare dataset info for the first dataset if none specified
if not recommendedViz and datasets:
name, df = next(iter(datasets.items()))
recommendedViz = [{
"type": "auto",
"dataSource": name,
"variables": df.columns.tolist()[:5],
"purpose": "general analysis"
}]
# Create visualization code prompt
vizPrompt = f"""
Generate Python matplotlib/seaborn code to create a visualization for:
TASK: {prompt}
VISUALIZATION REQUIREMENTS:
- Output format: {formatType}
- Filename: {outputLabel}
- Description: {description}
RECOMMENDED VISUALIZATION:
{json.dumps(recommendedViz, indent=2)}
AVAILABLE DATASETS:
"""
# Add dataset info for recommended sources
for viz in recommendedViz:
dataSource = viz.get("dataSource")
if dataSource in datasets:
df = datasets[dataSource]
vizPrompt += f"\nDataset '{dataSource}':\n"
vizPrompt += f"- Shape: {df.shape}\n"
vizPrompt += f"- Columns: {df.columns.tolist()}\n"
vizPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n"
vizPrompt += """
Generate ONLY Python code that:
1. Uses matplotlib and/or seaborn to create a clear visualization
2. Sets figure size to (10, 6)
3. Includes appropriate titles, labels, and legend
4. Uses professional color schemes
5. Handles any missing data gracefully
Return ONLY executable Python code, no explanations or markdown.
"""
try:
# Get visualization code from AI
vizCode = await self.mydom.callAi([
{"role": "system", "content": "You are a data visualization expert. Provide only executable Python code."},
{"role": "user", "content": vizPrompt}
], produceUserAnswer = True)
# Clean code
vizCode = vizCode.replace("```python", "").replace("```", "").strip()
# Execute visualization code
plt.figure(figsize=(10, 6))
# Make local variables available to the code
localVars = {
"plt": plt,
"sns": sns,
"pd": pd,
"np": __import__('numpy')
}
# Add datasets to local variables
for name, df in datasets.items():
# Create a sanitized variable name
varName = ''.join(c if c.isalnum() else '_' for c in name)
localVars[varName] = df
# Also add with standard names for simpler code
if "df" not in localVars:
localVars["df"] = df
elif "df2" not in localVars:
localVars["df2"] = df
# Execute the visualization code
exec(vizCode, globals(), localVars)
# Capture the image
imgData = self._getImageBase64(formatType)
plt.close()
return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
except Exception as e:
logger.error(f"Error creating visualization: {str(e)}", exc_info=True)
# Create error message image
plt.figure(figsize=(10, 6))
plt.text(0.5, 0.5, f"Visualization error: {str(e)}",
ha='center', va='center', fontsize=12)
plt.tight_layout()
imgData = self._getImageBase64(formatType)
plt.close()
return self.formatAgentDocumentOutput(outputLabel, imgData, f"image/{formatType}")
async def _createDataDocument(self, datasets: Dict, prompt: str, outputLabel: str,
analysisPlan: Dict, description: str) -> Dict:
"""
Create a data document (e.g., CSV, JSON) based on analysis.
Args:
datasets: Dictionary of datasets
prompt: Original task prompt
outputLabel: Output filename
analysisPlan: Analysis plan from AI
description: Output description
Returns:
Data document
"""
# Determine format from filename
formatType = outputLabel.split('.')[-1].lower()
# If no datasets available, return error message
if not datasets:
return {
"label": outputLabel,
"content": f"No data available for processing into {formatType} format.",
"metadata": {
"contentType": "text/plain"
}
}
# Generate data processing instructions
dataPrompt = f"""
Create Python code to process datasets and generate a {formatType} file for:
TASK: {prompt}
OUTPUT REQUIREMENTS:
- Format: {formatType}
- Filename: {outputLabel}
- Description: {description}
ANALYSIS CONTEXT:
{json.dumps(analysisPlan, indent=2)}
AVAILABLE DATASETS:
"""
# Add dataset info
for name, df in datasets.items():
dataPrompt += f"\nDataset '{name}':\n"
dataPrompt += f"- Shape: {df.shape}\n"
dataPrompt += f"- Columns: {df.columns.tolist()}\n"
dataPrompt += f"- Sample data: {df.head(3).to_dict(orient='records')}\n"
dataPrompt += """
Generate Python code that:
1. Processes the available dataset(s)
2. Performs necessary transformations, aggregations, or calculations
3. Outputs the result in the requested format
4. Returns the content as a string variable named 'result'
Return ONLY executable Python code, no explanations or markdown.
"""
try:
# Get data processing code from AI
dataCode = await self.mydom.callAi([
{"role": "system", "content": "You are a data processing expert. Provide only executable Python code."},
{"role": "user", "content": dataPrompt}
], produceUserAnswer = True)
# Clean code
dataCode = dataCode.replace("```python", "").replace("```", "").strip()
# Setup execution environment
localVars = {"pd": pd, "np": __import__('numpy'), "io": io}
# Add datasets to local variables
for name, df in datasets.items():
# Create a sanitized variable name
varName = ''.join(c if c.isalnum() else '_' for c in name)
localVars[varName] = df
# Also add with standard names for simpler code
if "df" not in localVars:
localVars["df"] = df
elif "df2" not in localVars:
localVars["df2"] = df
# Execute the code
exec(dataCode, globals(), localVars)
# Get the result
result = localVars.get("result", "No output was generated.")
# Determine content type
contentType = "text/csv" if formatType == "csv" else \
"application/json" if formatType == "json" else \
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" if formatType == "xlsx" else \
"text/plain"
return self.formatAgentDocumentOutput(outputLabel, result, contentType)
except Exception as e:
logger.error(f"Error creating data document: {str(e)}", exc_info=True)
return {
"label": outputLabel,
"content": f"Error generating {formatType} document: {str(e)}",
"metadata": {
"contentType": "text/plain"
}
}
async def _createTextDocument(self, datasets: Dict, context: str, prompt: str,
outputLabel: str, formatType: str,
analysisPlan: Dict, description: str) -> Dict:
"""
Create a text document (report, analysis, etc.) based on analysis.
Args:
datasets: Dictionary of datasets
context: Document context text
prompt: Original task prompt
outputLabel: Output filename
formatType: Output format type
analysisPlan: Analysis plan from AI
description: Output description
Returns:
Text document
"""
# Create dataset summaries
datasetSummaries = []
for name, df in datasets.items():
summary = f"Dataset: {name}\n"
summary += f"- Shape: {df.shape[0]} rows, {df.shape[1]} columns\n"
summary += f"- Columns: {', '.join(df.columns.tolist())}\n"
# Basic statistics for numeric columns
numericCols = df.select_dtypes(include=['number']).columns
if len(numericCols) > 0:
summary += "- Numeric Columns Stats:\n"
for col in numericCols[:3]: # Limit to first 3
stats = df[col].describe()
summary += f" - {col}: min={stats['min']:.2f}, max={stats['max']:.2f}, mean={stats['mean']:.2f}\n"
datasetSummaries.append(summary)
# Determine content type based on format
contentType = "text/markdown" if formatType in ["md", "markdown"] else \
"text/html" if formatType == "html" else \
"text/plain"
# Generate analysis prompt
analysisPrompt = f"""
Create a detailed {formatType} document for:
TASK: {prompt}
OUTPUT REQUIREMENTS:
- Format: {formatType}
- Filename: {outputLabel}
- Description: {description}
ANALYSIS CONTEXT:
{json.dumps(analysisPlan, indent=2)}
DATASET SUMMARIES:
{"".join(datasetSummaries)}
DOCUMENT CONTEXT:
{context[:2000]}... (truncated)
Create a comprehensive, professional analysis document that addresses the task requirements.
The document should:
1. Have a clear structure with headings and sections
2. Include relevant data findings and insights
3. Provide appropriate interpretations and recommendations
4. Format the content according to the required output format
Your response should be the complete document content in the specified format.
"""
try:
# Get document content from AI
documentContent = await self.mydom.callAi([
{"role": "system", "content": f"You are a data analysis expert creating a {formatType} document."},
{"role": "user", "content": analysisPrompt}
], produceUserAnswer = True)
# Clean HTML or Markdown if needed
if formatType in ["md", "markdown"] and not documentContent.strip().startswith("#"):
documentContent = f"# Analysis Report\n\n{documentContent}"
elif formatType == "html" and not "<html" in documentContent.lower():
documentContent = f"<html><body>{documentContent}</body></html>"
return self.formatAgentDocumentOutput(outputLabel, documentContent, contentType)
except Exception as e:
logger.error(f"Error creating text document: {str(e)}", exc_info=True)
# Create a simple error document
if formatType in ["md", "markdown"]:
content = f"# Error in Analysis\n\nThere was an error generating the analysis: {str(e)}"
elif formatType == "html":
content = f"<html><body><h1>Error in Analysis</h1><p>There was an error generating the analysis: {str(e)}</p></body></html>"
else:
content = f"Error in Analysis\n\nThere was an error generating the analysis: {str(e)}"
return {
"label": outputLabel,
"content": content,
"metadata": {
"contentType": contentType
}
}
def _getImageBase64(self, formatType: str = 'png') -> str:
"""
Convert current matplotlib figure to base64 string.
Args:
formatType: Image format
Returns:
Base64 encoded string of the image
"""
buffer = io.BytesIO()
plt.savefig(buffer, format=formatType, dpi=100)
buffer.seek(0)
imageData = buffer.getvalue()
buffer.close()
# Convert to base64
return base64.b64encode(imageData).decode('utf-8')
# Factory function for the Analyst agent
def getAgentAnalyst():
"""Returns an instance of the Analyst agent."""
return AgentAnalyst()

View file

@ -1,764 +0,0 @@
"""
Simple Coder Agent for execution of Python code.
Modified to pass expected output document names to the generated code.
"""
import logging
import json
import os
import subprocess
import tempfile
import shutil
import sys
from typing import Dict, Any, List, Tuple
from modules.workflowAgentsRegistry import AgentBase
from modules.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
class AgentCoder(AgentBase):
"""Simplified Agent for developing and executing Python code with integrated executor"""
def __init__(self):
"""Initialize the coder agent"""
super().__init__()
self.name = "coder"
self.description = "Develops and executes Python code for data processing and automation"
self.capabilities = [
"code_development",
"data_processing",
"file_processing",
"automation",
"code_execution"
]
# Executor settings
self.executorTimeout = int(APP_CONFIG.get("Agent_Coder_EXECUTION_TIMEOUT")) # seconds
self.executionRetryLimit = int(APP_CONFIG.get("Agent_Coder_EXECUTION_RETRY")) # max retries
self.tempDir = None
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
self.mydom = mydom
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task and perform code development/execution.
First checks if the task can be completed without code execution,
then falls back to code generation if needed.
Enhanced to ensure all generated documents are included in output.
Args:
task: Task dictionary with prompt, inputDocuments, outputSpecifications
Returns:
Dictionary with feedback and documents
"""
# 1. Extract task information
prompt = task.get("prompt", "")
inputDocuments = task.get("inputDocuments", [])
outputSpecs = task.get("outputSpecifications", [])
# Check if AI service is available
if not self.mydom:
logger.error("No AI service configured for the Coder agent")
return {
"feedback": "The Coder agent is not properly configured.",
"documents": []
}
# 2. Extract data from documents in separate categories
documentData = [] # For raw file data (for code execution)
contentData = [] # For content data (later use)
contentExtraction = [] # For AI-extracted data (for quick completion)
for doc in inputDocuments:
# Create proper filename from name and ext
filename = f"{doc.get('name')}.{doc.get('ext')}" if doc.get('ext') else doc.get('name')
# Add main document data to documentData if it exists
docData = doc.get('data', '')
if docData:
isBase64 = True # Assume base64 encoded for document data
documentData.append([filename, docData, isBase64])
# Process contents for different uses
if doc.get('contents'):
for content in doc.get('contents', []):
contentName = content.get('name', 'unnamed')
# For AI-extracted data (quick completion)
if content.get('dataExtracted'):
contentExtraction.append({
"filename": filename,
"contentName": contentName,
"contentData": content.get('dataExtracted', ''),
"contentType": content.get('contentType', ''),
"summary": content.get('summary', '')
})
# For raw content data
if content.get('data'):
rawData = content.get('data', '')
isBase64 = content.get('metadata', {}).get('base64Encoded', False)
contentData.append({
"filename": filename,
"contentName": contentName,
"data": rawData,
"isBase64": isBase64,
"contentType": content.get('contentType', '')
})
# Also add to documentData for code execution if not already added
if not docData or docData != rawData:
documentData.append([filename, rawData, isBase64])
# 3. Check if task can be completed without code execution
quickCompletion = await self._checkQuickCompletion(prompt, contentExtraction, outputSpecs)
if quickCompletion and quickCompletion.get("complete") == 1:
logger.info("Task completed without code execution")
return {
"feedback": quickCompletion.get("prompt", "Task completed successfully."),
"documents": quickCompletion.get("documents", [])
}
else:
logger.debug(f"Code to generate, no quick check")
# If quick completion not possible, continue with code generation and execution
logger.info("Generating code to solve the task")
# 4. Generate code using AI
code, requirements = await self._generateCode(prompt, outputSpecs)
if not code:
return {
"feedback": "Failed to generate code for the task.",
"documents": []
}
# 5. Replace the placeholder with actual inputFiles data
documentDataJson = repr(documentData)
codeWithData = code.replace("inputFiles = \"=== JSONLOAD ===\"", f"inputFiles = {documentDataJson}")
# 6. Execute code with retry logic
retryCount = 0
maxRetries = self.executionRetryLimit
executionHistory = []
while retryCount <= maxRetries:
executionResult = self._executeCode(codeWithData, requirements)
executionHistory.append({
"attempt": retryCount + 1,
"code": codeWithData,
"result": executionResult
})
# Check if execution was successful
if executionResult.get("success", False):
logger.info(f"Code execution succeeded on attempt {retryCount + 1}")
break
# If we've reached max retries, exit the loop
if retryCount >= maxRetries:
logger.info(f"Reached maximum retry limit ({maxRetries}). Giving up.")
break
# Log the error and attempt to improve the code
error = executionResult.get("error", "Unknown error")
logger.info(f"Execution attempt {retryCount + 1} failed: {error}. Attempting to improve code.")
# Generate improved code based on error
improvedCode, improvedRequirements = await self._improveCode(
originalCode=codeWithData,
error=error,
executionResult=executionResult,
attempt=retryCount + 1,
outputSpecs=outputSpecs
)
if improvedCode:
codeWithData = improvedCode
requirements = improvedRequirements
logger.info(f"Code improved for retry {retryCount + 2}")
else:
logger.warning("Failed to improve code, using original code for retry")
retryCount += 1
# 7. Process results and create output documents
documents = []
# Always add the final code document
documents.append(self.formatAgentDocumentOutput("generated_code.py", codeWithData, "text/plain"))
# Add execution history document
executionHistoryStr = json.dumps(executionHistory, indent=2)
documents.append(self.formatAgentDocumentOutput("execution_history.json", executionHistoryStr, "application/json"))
# Enhanced result handling: Create documents based on execution results - fixed for proper content extraction
if executionResult.get("success", False):
resultData = executionResult.get("result")
# Process results from the result dictionary if available
if isinstance(resultData, dict):
# First, create a mapping of expected output labels to their specs
expectedOutputs = {spec.get("label"): spec for spec in outputSpecs}
createdOutputs = set()
for label, result_item in resultData.items():
# Check if result follows the expected structure with nested content
if isinstance(result_item, dict) and "content" in result_item:
# Extract values from the properly structured result
content = result_item.get("content", "") # Extract the inner content
base64Encoded = result_item.get("base64Encoded", False)
contentType = result_item.get("contentType", "text/plain")
# Check if this label matches one of our expected output documents
# If not, but we haven't created all expected outputs yet, try to map it
finalLabel = label
if label not in expectedOutputs and len(expectedOutputs) > 0:
# Find an unused expected output label
for expectedLabel in expectedOutputs:
if expectedLabel not in createdOutputs:
logger.warning(f"Remapping output '{label}' to expected '{expectedLabel}'")
finalLabel = expectedLabel
break
# Create document by passing only the content to formatAgentDocumentOutput
doc = self.formatAgentDocumentOutput(finalLabel, content, contentType)
# Override the base64Encoded flag with the value from the result
# This is needed since formatAgentDocumentOutput might determine a different value
if isinstance(base64Encoded, bool):
doc["base64Encoded"] = base64Encoded
documents.append(doc)
createdOutputs.add(finalLabel)
logger.info(f"Created document from result: {finalLabel} ({contentType}, base64={base64Encoded})")
else:
# Not properly structured - log warning
logger.warning(f"Skipping improperly formatted result for '{label}'. Results must include 'content' field.")
else:
# No result dictionary found
logger.warning("No valid result dictionary found or it's not properly formatted")
# If no valid documents were created from the result dictionary but we have output specifications
if len(documents) <= 2 and outputSpecs: # Only code.py and history.json exist
logger.warning("No valid documents created from result dictionary, using execution output for specifications")
# Default to execution output
output = executionResult.get("output", "")
for spec in outputSpecs:
label = spec.get("label", "output.txt")
# Create basic document from output
doc = self.formatAgentDocumentOutput(label, output, "text/plain")
documents.append(doc)
logger.info(f"Created document from output specification: {label}")
if retryCount > 0:
feedback = f"Code executed successfully after {retryCount + 1} attempts. Generated {len(documents) - 2} output files."
else:
feedback = f"Code executed successfully. Generated {len(documents) - 2} output files."
else:
# Execution failed
error = executionResult.get("error", "Unknown error")
documents.append(self.formatAgentDocumentOutput("execution_error.txt", f"Error executing code:\n\n{error}", "text/plain"))
if retryCount > 0:
feedback = f"Error during code execution after {retryCount + 1} attempts: {error}"
else:
feedback = f"Error during code execution: {error}"
return {
"feedback": feedback,
"documents": documents
}
async def _improveCode(self, originalCode: str, error: str, executionResult: Dict[str, Any], attempt: int, outputSpecs: List[Dict[str, Any]] = None) -> Tuple[str, List[str]]:
"""
Improve code based on execution error.
Enhanced to maintain proper output handling with correct document structure.
Args:
originalCode: The code that failed to execute
error: The error message
executionResult: Complete execution result dictionary
attempt: Current attempt number
outputSpecs: List of expected output specifications
Returns:
Tuple of (improvedCode, requirements)
"""
# Create a string with output specifications to be included in the prompt
outputSpecsStr = ""
if outputSpecs:
outputSpecsStr = "\nEXPECTED OUTPUT DOCUMENTS:\n"
for i, spec in enumerate(outputSpecs, 1):
label = spec.get("label", f"output{i}.txt")
description = spec.get("description", "")
outputSpecsStr += f"{i}. {label} - {description}\n"
# Create prompt for code improvement
improvementPrompt = f"""
Fix the following Python code that failed during execution. This is attempt {attempt} to fix the code.
ORIGINAL CODE:
{originalCode}
ERROR MESSAGE:
{error}
STDOUT:
{executionResult.get('output', '')}
{outputSpecsStr}
INSTRUCTIONS:
1. Fix all errors identified in the error message
2. Diagnose and fix any logical issues
3. Pay special attention to:
- Type conversions and data handling
- Error handling and edge cases
- Resource management (file handles, etc.)
- Syntax errors and typos
4. Keep the inputFiles handling logic intact
5. Maintain the same overall structure and purpose
OUTPUT REQUIREMENTS (VERY IMPORTANT):
- Your code MUST define a 'result' variable as a dictionary to store ALL outputs
- The key for each entry MUST be the full filename with extension (e.g., "output.txt")
- The value for each entry MUST be a dictionary with the following structure:
{{
"content": string, # The actual content (text or base64-encoded string)
"base64Encoded": boolean, # Set to true for binary data, false for text data
"contentType": string # MIME type of the content (e.g., "text/plain", "application/json")
}}
- Example result dictionary:
result = {{
"output.txt": {{
"content": "This is text content",
"base64Encoded": False,
"contentType": "text/plain"
}},
"chart.png": {{
"content": "base64encodedstring...",
"base64Encoded": True,
"contentType": "image/png"
}}
}}
- NEVER write files to disk using open() or similar methods - use the result dictionary instead
JSON OUTPUT (CRITICAL):
- After creating the result dictionary, you MUST print it as JSON to stdout
- Make sure your code includes: print(json.dumps(result)) as the final line
- This printed JSON is how the system captures your result
REQUIREMENTS:
Required packages should be specified as:
# REQUIREMENTS: library==version,library2>=version
- You may add/remove requirements as needed to fix the code
Return ONLY Python code without explanations or markdown.
"""
# Call AI service
messages = [
{"role": "system", "content": "You are an expert Python code debugger. Provide only fixed Python code without explanations or formatting. Ensure all generated files are included in the 'result' dictionary and that result is printed as JSON with print(json.dumps(result))."},
{"role": "user", "content": improvementPrompt}
]
try:
improvedContent = await self.mydom.callAi(messages, temperature=0.2)
# Extract code and requirements
improvedCode = self._cleanCode(improvedContent)
# Extract requirements
requirements = []
for line in improvedCode.split('\n'):
if line.strip().startswith("# REQUIREMENTS:"):
reqStr = line.replace("# REQUIREMENTS:", "").strip()
requirements = [r.strip() for r in reqStr.split(',') if r.strip()]
break
return improvedCode, requirements
except Exception as e:
logger.error(f"Error improving code: {str(e)}")
return None, []
async def _checkQuickCompletion(self, prompt: str, contentExtraction: List[Dict], outputSpecs: List[Dict]) -> Dict:
"""
Check if the task can be completed without writing and executing code.
Args:
prompt: The task prompt
contentExtraction: List of extracted content data with contentName and dataExtracted
outputSpecs: List of output specifications
Returns:
Dictionary with completion status and results, or None if no quick completion
"""
# If no data or no output specs, can't do a quick completion
if not contentExtraction or not outputSpecs:
return None
# Create a prompt for the AI to check if this can be completed directly
specsJson = json.dumps(outputSpecs)
dataJson = json.dumps(contentExtraction)
checkPrompt = f"""
Analyze this task and determine if it can be completed directly without writing code.
TASK:
{prompt}
EXTRACTED DATA AVAILABLE:
{dataJson}
Each entry in the extracted data contains:
- filename: The source file name
- contentName: The specific content section name
- contentData: The AI-extracted text from the content
- contentType: The type of content (text, csv, etc.)
- summary: A brief summary of the content
REQUIRED OUTPUT:
{specsJson}
If the task can be completed directly with the available extracted data, respond with:
{{"complete": 1, "prompt": "Brief explanation of the solution", "documents": [
{{"label": "filename.ext", "content": "content here"}}
]}}
If code would be needed to properly complete this task, respond with:
{{"complete": 0, "prompt": "Explanation why code is needed"}}
Only return valid JSON. Your entire response must be parseable as JSON.
"""
# Call AI service
logger.debug(f"Checking if task can be completed without code execution: {checkPrompt}")
messages = [
{"role": "system", "content": "You are an AI assistant that determines if tasks require code execution. Reply with JSON only."},
{"role": "user", "content": checkPrompt}
]
try:
# Use a lower temperature for more deterministic response
response = await self.mydom.callAi(messages, produceUserAnswer = True, temperature=0.1)
# Parse response as JSON
if response:
try:
# Find JSON in response if there's any text around it
jsonStart = response.find('{')
jsonEnd = response.rfind('}') + 1
if jsonStart >= 0 and jsonEnd > jsonStart:
jsonStr = response[jsonStart:jsonEnd]
result = json.loads(jsonStr)
# Check if this is a proper response
if "complete" in result:
return result
except json.JSONDecodeError:
logger.debug("Failed to parse quick completion response as JSON")
pass
except Exception as e:
logger.debug(f"Error during quick completion check: {str(e)}")
# Default to requiring code execution
return None
async def _generateCode(self, prompt: str, outputSpecs: List[Dict[str, Any]] = None) -> Tuple[str, List[str]]:
"""
Generate Python code from a prompt with the inputFiles placeholder.
Enhanced to emphasize proper result output handling with correct document structure.
Args:
prompt: The task prompt
outputSpecs: List of expected output specifications
Returns:
Tuple of (code, requirements)
"""
# Create a string with output specifications to be included in the prompt
outputSpecsStr = ""
if outputSpecs:
outputSpecsStr = "\nEXPECTED OUTPUT DOCUMENTS:\n"
for i, spec in enumerate(outputSpecs, 1):
label = spec.get("label", f"output{i}.txt")
description = spec.get("description", "")
outputSpecsStr += f"{i}. {label} - {description}\n"
# Create improved prompt for code generation
aiPrompt = f"""
Generate Python code to solve the following task:
TASK:
{prompt}
{outputSpecsStr}
INPUT FILES:
- 'inputFiles' variable is provided as [[filename, data, isBase64], ...]
- For text files (isBase64=False): use data directly as string
- For binary files (isBase64=True): use base64.b64decode(data)
OUTPUT REQUIREMENTS (VERY IMPORTANT):
- Your code MUST define a 'result' variable as a dictionary to store ALL outputs
- The key for each entry MUST be the full filename with extension (e.g., "output.txt")
- The value for each entry MUST be a dictionary with the following structure:
{{
"content": string, # The actual content (text or base64-encoded string)
"base64Encoded": boolean, # Set to true for binary data, false for text data
"contentType": string # MIME type of the content (e.g., "text/plain", "application/json")
}}
- Example result dictionary:
result = {{
"output.txt": {{
"content": "This is text content",
"base64Encoded": False,
"contentType": "text/plain"
}},
"chart.png": {{
"content": "base64encodedstring...",
"base64Encoded": True,
"contentType": "image/png"
}}
}}
- NEVER write files to disk using open() or similar methods - use the result dictionary instead
- If you generate any charts, reports, or visualizations, ensure they are properly encoded and included
IMPORTANT - USE EXACT OUTPUT FILENAMES:
- You MUST use the EXACT filenames specified in EXPECTED OUTPUT DOCUMENTS section
- The key in the result dictionary must match these filenames precisely
- If no output documents are specified, use appropriate descriptive filenames
JSON OUTPUT (CRITICAL):
- After creating the result dictionary, you MUST print it as JSON to stdout using json.dumps()
- Add these lines at the end of your code:
import json # if not already imported
print(json.dumps(result))
- This printed JSON is how the system captures your result
- Make sure this is the last thing your code prints
BINARY DATA HANDLING:
- For binary content (images, PDFs, etc.), convert to base64 string and set base64Encoded=True
- For text content (text, JSON, HTML, etc.), use plain string and set base64Encoded=False
- Use appropriate MIME types for different content types
CODE QUALITY:
- Use explicit type conversions where needed (int/float/str)
- Implement feature detection, not version checks
- Handle errors gracefully with appropriate fallbacks
- Follow latest API conventions for libraries
- Validate inputs before processing
Your code must start with:
inputFiles = "=== JSONLOAD ===" # DO NOT CHANGE THIS LINE
REQUIREMENTS:
Required packages should be specified as:
# REQUIREMENTS: library==version,library2>=version
- Specify exact versions for critical libraries
- Use constraint operators (==,>=,<=) as needed
Return ONLY Python code without explanations or markdown.
"""
# Call AI service
messages = [
{"role": "system", "content": "You are a Python code generator. Provide only valid Python code without explanations or formatting. Always output the result dictionary as JSON using print(json.dumps(result)) at the end of your code."},
{"role": "user", "content": aiPrompt}
]
generatedContent = await self.mydom.callAi(messages, temperature=0.1)
# Extract code and requirements
code = self._cleanCode(generatedContent)
# Extract requirements
requirements = []
for line in code.split('\n'):
if line.strip().startswith("# REQUIREMENTS:"):
reqStr = line.replace("# REQUIREMENTS:", "").strip()
requirements = [r.strip() for r in reqStr.split(',') if r.strip()]
break
return code, requirements
def _executeCode(self, code: str, requirements: List[str] = None) -> Dict[str, Any]:
"""
Execute Python code in a virtual environment.
Integrated executor functionality with enhanced result extraction.
Args:
code: Python code to execute
requirements: List of required packages
Returns:
Execution result dictionary
"""
try:
# 1. Create temp directory and virtual environment
self.tempDir = tempfile.mkdtemp(prefix="code_exec_")
venvPath = os.path.join(self.tempDir, "venv")
# Create venv
logger.debug(f"Creating virtual environment at {venvPath}")
subprocess.run([sys.executable, "-m", "venv", venvPath],
check=True, capture_output=True)
# Get Python executable path
pythonExe = os.path.join(venvPath, "Scripts", "python.exe") if os.name == 'nt' else os.path.join(venvPath, "bin", "python")
# 2. Install requirements if provided
if requirements:
logger.info(f"Installing requirements: {requirements}")
# Create requirements.txt
reqFile = os.path.join(self.tempDir, "requirements.txt")
with open(reqFile, "w") as f:
f.write("\n".join(requirements))
x="\n".join(requirements)
logger.info(f"Requirements file: {x}.")
# Install requirements
try:
pipResult = subprocess.run(
[pythonExe, "-m", "pip", "install", "-r", reqFile],
capture_output=True,
text=True,
timeout=int(APP_CONFIG.get("Agent_Coder_INSTALL_TIMEOUT"))
)
if pipResult.returncode != 0:
logger.debug(f"Error installing requirements: {pipResult.stderr}")
else:
logger.debug(f"Requirements installed successfully")
# Log installed packages if in debug mode
if logger.isEnabledFor(logging.DEBUG):
pipList = subprocess.run(
[pythonExe, "-m", "pip", "list"],
capture_output=True,
text=True
)
logger.debug(f"Installed packages:\n{pipList.stdout}")
except Exception as e:
logger.debug(f"Exception during requirements installation: {str(e)}")
# 3. Write code to file
codeFile = os.path.join(self.tempDir, "code.py")
with open(codeFile, "w", encoding="utf-8") as f:
f.write(code)
# 4. Execute code
logger.debug(f"Executing code with timeout of {self.executorTimeout} seconds. Code: {code}")
process = subprocess.run(
[pythonExe, codeFile],
timeout=self.executorTimeout,
capture_output=True,
text=True
)
# 5. Process results
stdout = process.stdout
stderr = process.stderr
# Try to extract result from stdout
resultData = None
if process.returncode == 0:
try:
# Find the last line that might be JSON
jsonLines = []
for line in stdout.strip().split('\n'):
line = line.strip()
if line and line[0] in '{[' and line[-1] in '}]':
try:
parsed = json.loads(line)
jsonLines.append((line, parsed))
except json.JSONDecodeError:
continue
# Use the last valid JSON that appears to be a dictionary
if jsonLines:
for line, parsed in reversed(jsonLines):
if isinstance(parsed, dict):
resultData = parsed
logger.debug(f"Extracted result data from stdout: {type(resultData)}")
break
except Exception as e:
logger.debug(f"Error extracting result from stdout: {str(e)}")
# Enhanced logging of what was found
if resultData:
logger.info(f"Found result dictionary with {len(resultData)} entries: {list(resultData.keys())}")
else:
logger.warning("No result dictionary found in output")
# Create result dictionary
return {
"success": process.returncode == 0,
"output": stdout,
"error": stderr if process.returncode != 0 else "",
"result": resultData,
"exitCode": process.returncode
}
except subprocess.TimeoutExpired:
logger.error(f"Execution timed out after {self.executorTimeout} seconds")
return {
"success": False,
"output": "",
"error": f"Execution timed out after {self.executorTimeout} seconds",
"result": None,
"exitCode": -1
}
except Exception as e:
logger.error(f"Execution error: {str(e)}")
return {
"success": False,
"output": "",
"error": f"Execution error: {str(e)}",
"result": None,
"exitCode": -1
}
finally:
# Clean up resources
self._cleanupExecution()
def _cleanupExecution(self):
"""Clean up temporary resources from code execution."""
if self.tempDir and os.path.exists(self.tempDir):
try:
logger.debug(f"Cleaning up temporary directory: {self.tempDir}")
shutil.rmtree(self.tempDir)
self.tempDir = None
except Exception as e:
logger.warning(f"Error cleaning up temp directory: {str(e)}")
def _cleanCode(self, code: str) -> str:
"""Remove any markdown formatting or explanations."""
# Remove code block markers
code = code.replace("```python", "").replace("```", "")
# Remove explanations before or after code
lines = code.strip().split('\n')
startIndex = 0
endIndex = len(lines)
# Find start of actual code
for i, line in enumerate(lines):
if line.strip().startswith("inputFiles =") or line.strip().startswith("# REQUIREMENTS:"):
startIndex = i
break
# Clean code
cleanedCode = '\n'.join(lines[startIndex:endIndex])
return cleanedCode.strip()
# Factory function for the Coder agent
def getAgentCoder():
"""Returns an instance of the Coder agent."""
return AgentCoder()

View file

@ -1,559 +0,0 @@
"""
Documentation agent for creating documentation, reports, and structured content.
Reimagined with an output-first, AI-driven approach with multi-step document generation.
"""
import logging
import json
from typing import Dict, Any, List
from modules.workflowAgentsRegistry import AgentBase
logger = logging.getLogger(__name__)
class AgentDocumentation(AgentBase):
"""AI-driven agent for creating documentation and structured content using multi-step generation"""
def __init__(self):
"""Initialize the documentation agent"""
super().__init__()
self.name = "documentation"
self.description = "Creates structured documentation, reports, and content using AI with multi-step generation"
self.capabilities = [
"report_generation",
"documentation",
"content_structuring",
"technical_writing",
"knowledge_organization"
]
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
self.mydom = mydom
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task by focusing on required outputs and using AI to generate them.
Args:
task: Task dictionary with prompt, inputDocuments, outputSpecifications
Returns:
Dictionary with feedback and documents
"""
try:
# Extract task information
prompt = task.get("prompt", "")
inputDocuments = task.get("inputDocuments", [])
outputSpecs = task.get("outputSpecifications", [])
# Check AI service
if not self.mydom:
return {
"feedback": "The Documentation agent requires an AI service to function.",
"documents": []
}
# Extract context from input documents - focusing only on dataExtracted
documentContext = self._extractDocumentContext(inputDocuments)
# Create task analysis to understand the requirements
documentationPlan = await self._analyzeTask(prompt, documentContext, outputSpecs)
# Generate all required output documents
documents = []
# If no output specs provided, create default document
if not outputSpecs:
defaultFormat = documentationPlan.get("recommendedFormat", "markdown")
defaultTitle = documentationPlan.get("title", "Documentation")
safeTitle = self._sanitizeFilename(defaultTitle)
outputSpecs = [
{"label": f"{safeTitle}.{defaultFormat}", "description": "Comprehensive documentation"}
]
# Process each output specification
for spec in outputSpecs:
outputLabel = spec.get("label", "")
outputDescription = spec.get("description", "")
# Generate the document using multi-step approach
document = await self._createDocumentMultiStep(
prompt,
documentContext,
outputLabel,
outputDescription,
documentationPlan
)
documents.append(document)
# Generate feedback
feedback = documentationPlan.get("feedback", f"Created {len(documents)} documents based on your requirements.")
return {
"feedback": feedback,
"documents": documents
}
except Exception as e:
logger.error(f"Error in documentation generation: {str(e)}", exc_info=True)
return {
"feedback": f"Error during documentation generation: {str(e)}",
"documents": []
}
def _extractDocumentContext(self, documents: List[Dict[str, Any]]) -> str:
"""
Extract context from input documents, focusing on dataExtracted.
Args:
documents: List of document objects
Returns:
Extracted context as text
"""
contextParts = []
for doc in documents:
docName = doc.get("name", "unnamed")
if doc.get("ext"):
docName = f"{docName}.{doc.get('ext')}"
contextParts.append(f"\n\n--- {docName} ---\n")
# Process contents for dataExtracted
for content in doc.get("contents", []):
if content.get("dataExtracted"):
contextParts.append(content.get("dataExtracted", ""))
return "\n".join(contextParts)
def _sanitizeFilename(self, filename: str) -> str:
"""
Sanitize a filename by removing invalid characters.
Args:
filename: Filename to sanitize
Returns:
Sanitized filename
"""
# Replace invalid characters with underscores
invalidChars = r'<>:"/\|?*'
for char in invalidChars:
filename = filename.replace(char, '_')
# Trim filename if too long
if len(filename) > 100:
filename = filename[:97] + "..."
return filename
async def _analyzeTask(self, prompt: str, context: str, outputSpecs: List) -> Dict:
"""
Use AI to analyze the task and create a documentation plan.
Args:
prompt: The task prompt
context: Document context
outputSpecs: Output specifications
Returns:
Documentation plan dictionary
"""
analysisPrompt = f"""
Analyze this documentation task and create a detailed plan.
TASK: {prompt}
DOCUMENT CONTEXT SAMPLE:
{context[:1000]}... (truncated)
OUTPUT REQUIREMENTS:
{json.dumps(outputSpecs, indent=2)}
Create a detailed documentation plan in JSON format with the following structure:
{{
"title": "Document Title",
"documentType": "report|manual|guide|whitepaper|etc",
"audience": "technical|general|executive|etc",
"detailedStructure": [
{{
"title": "Chapter/Section Title",
"keyPoints": ["point1", "point2", ...],
"subsections": ["subsection1", "subsection2", ...],
"importance": "high|medium|low",
"estimatedLength": "short|medium|long"
}},
... more sections ...
],
"keyTopics": ["topic1", "topic2", ...],
"tone": "formal|conversational|instructional|etc",
"recommendedFormat": "markdown|html|text|etc",
"formattingRequirements": ["requirement1", "requirement2", ...],
"executiveSummary": "Brief description of what the document will cover",
"feedback": "Brief message explaining the documentation approach"
}}
Only return valid JSON. No preamble or explanations.
"""
try:
response = await self.mydom.callAi([
{"role": "system", "content": "You are a documentation expert. Respond with valid JSON only."},
{"role": "user", "content": analysisPrompt}
])
# Extract JSON from response
jsonStart = response.find('{')
jsonEnd = response.rfind('}') + 1
if jsonStart >= 0 and jsonEnd > jsonStart:
plan = json.loads(response[jsonStart:jsonEnd])
return plan
else:
# Fallback if JSON not found
return {
"title": "Documentation",
"documentType": "report",
"audience": "general",
"detailedStructure": [
{
"title": "Introduction",
"keyPoints": ["Purpose", "Scope"],
"subsections": [],
"importance": "high",
"estimatedLength": "short"
},
{
"title": "Main Content",
"keyPoints": ["Core Information"],
"subsections": ["Key Findings", "Analysis"],
"importance": "high",
"estimatedLength": "long"
},
{
"title": "Conclusion",
"keyPoints": ["Summary", "Next Steps"],
"subsections": [],
"importance": "medium",
"estimatedLength": "short"
}
],
"keyTopics": ["General Information"],
"tone": "formal",
"recommendedFormat": "markdown",
"formattingRequirements": ["Clear headings", "Professional formatting"],
"executiveSummary": "A comprehensive documentation covering the requested topics.",
"feedback": "Created documentation based on your requirements."
}
except Exception as e:
logger.warning(f"Error creating documentation plan: {str(e)}")
return {
"title": "Documentation",
"documentType": "report",
"audience": "general",
"detailedStructure": [
{
"title": "Introduction",
"keyPoints": ["Purpose", "Scope"],
"subsections": [],
"importance": "high",
"estimatedLength": "short"
},
{
"title": "Main Content",
"keyPoints": ["Core Information"],
"subsections": ["Key Findings", "Analysis"],
"importance": "high",
"estimatedLength": "long"
},
{
"title": "Conclusion",
"keyPoints": ["Summary", "Next Steps"],
"subsections": [],
"importance": "medium",
"estimatedLength": "short"
}
],
"keyTopics": ["General Information"],
"tone": "formal",
"recommendedFormat": "markdown",
"formattingRequirements": ["Clear headings", "Professional formatting"],
"executiveSummary": "A comprehensive documentation covering the requested topics.",
"feedback": "Created documentation based on your requirements."
}
async def _createDocumentMultiStep(self, prompt: str, context: str, outputLabel: str,
outputDescription: str, documentationPlan: Dict) -> Dict:
"""
Create a document using a multi-step approach with separate AI calls for each section.
Args:
prompt: Original task prompt
context: Document context
outputLabel: Output filename
outputDescription: Description of desired output
documentationPlan: Documentation plan from AI
Returns:
Document object
"""
# Determine format from filename
formatType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "md"
# Map format to contentType
contentTypeMap = {
"md": "text/markdown",
"markdown": "text/markdown",
"html": "text/html",
"txt": "text/plain",
"text": "text/plain",
"json": "application/json",
"csv": "text/csv"
}
contentType = contentTypeMap.get(formatType, "text/plain")
# Get document information
title = documentationPlan.get("title", "Documentation")
documentType = documentationPlan.get("documentType", "document")
audience = documentationPlan.get("audience", "general")
tone = documentationPlan.get("tone", "formal")
keyTopics = documentationPlan.get("keyTopics", [])
formattingRequirements = documentationPlan.get("formattingRequirements", [])
# Get the detailed structure
detailedStructure = documentationPlan.get("detailedStructure", [])
if not detailedStructure:
# Fallback structure if none provided
detailedStructure = [
{
"title": "Introduction",
"keyPoints": ["Purpose", "Scope"],
"importance": "high"
},
{
"title": "Main Content",
"keyPoints": ["Core Information"],
"importance": "high"
},
{
"title": "Conclusion",
"keyPoints": ["Summary", "Next Steps"],
"importance": "medium"
}
]
try:
# Step 1: Generate document introduction
introPrompt = f"""
Create the introduction for a {documentType} titled "{title}".
DOCUMENT OVERVIEW:
- Type: {documentType}
- Audience: {audience}
- Tone: {tone}
- Key Topics: {', '.join(keyTopics)}
- Format: {formatType}
TASK CONTEXT: {prompt}
This introduction should:
1. Clearly state the purpose and scope of the document
2. Provide context and background information
3. Outline what the reader will find in the document
4. Set the appropriate tone for the {audience} audience
The introduction should be professional and engaging, formatted according to {formatType} standards.
"""
introduction = await self.mydom.callAi([
{"role": "system", "content": f"You are a documentation expert creating an introduction in {formatType} format."},
{"role": "user", "content": introPrompt}
], produceUserAnswer = True)
# Step 2: Generate executive summary (if applicable)
if documentType in ["report", "whitepaper", "case study"]:
summaryPrompt = f"""
Create an executive summary for a {documentType} titled "{title}".
DOCUMENT OVERVIEW:
- Type: {documentType}
- Audience: {audience}
- Key Topics: {', '.join(keyTopics)}
TASK CONTEXT: {prompt}
This executive summary should:
1. Provide a concise overview of the entire document
2. Highlight key findings, recommendations, or conclusions
3. Be suitable for executives or busy readers who may only read this section
4. Be professionally formatted according to {formatType} standards
Keep the summary focused and impactful, approximately 200-300 words.
"""
executiveSummary = await self.mydom.callAi([
{"role": "system", "content": f"You are a documentation expert creating an executive summary in {formatType} format."},
{"role": "user", "content": summaryPrompt}
], produceUserAnswer = True)
else:
executiveSummary = ""
# Step 3: Generate each section
sections = []
for section in detailedStructure:
sectionTitle = section.get("title", "Section")
keyPoints = section.get("keyPoints", [])
subsections = section.get("subsections", [])
importance = section.get("importance", "medium")
# Adjust depth based on importance
detailLevel = "high" if importance == "high" else "medium"
sectionPrompt = f"""
Create the "{sectionTitle}" section for a {documentType} titled "{title}".
SECTION DETAILS:
- Title: {sectionTitle}
- Key Points to Cover: {', '.join(keyPoints)}
- Subsections: {', '.join(subsections)}
- Detail Level: {detailLevel}
DOCUMENT CONTEXT:
- Type: {documentType}
- Audience: {audience}
- Tone: {tone}
- Format: {formatType}
TASK CONTEXT: {prompt}
AVAILABLE INFORMATION:
{context[:500]}... (truncated)
This section should:
1. Be comprehensive and well-structured
2. Cover all the key points listed
3. Include the specified subsections with appropriate headings
4. Maintain a {tone} tone suitable for the {audience} audience
5. Be properly formatted according to {formatType} standards
6. Include specific examples, data, or evidence where appropriate
Be thorough in your coverage of this section, providing substantive content.
"""
sectionContent = await self.mydom.callAi([
{"role": "system", "content": f"You are a documentation expert creating detailed content for the {sectionTitle} section."},
{"role": "user", "content": sectionPrompt}
], produceUserAnswer = True)
sections.append(sectionContent)
# Step 4: Generate conclusion
conclusionPrompt = f"""
Create the conclusion for a {documentType} titled "{title}".
DOCUMENT OVERVIEW:
- Type: {documentType}
- Audience: {audience}
- Key Topics: {', '.join(keyTopics)}
TASK CONTEXT: {prompt}
This conclusion should:
1. Summarize the key points covered in the document
2. Provide closure to the topics discussed
3. Include any relevant recommendations or next steps
4. Leave the reader with a clear understanding of the document's significance
The conclusion should be professional and impactful, formatted according to {formatType} standards.
"""
conclusion = await self.mydom.callAi([
{"role": "system", "content": f"You are a documentation expert creating a conclusion in {formatType} format."},
{"role": "user", "content": conclusionPrompt}
], produceUserAnswer = True)
# Step 5: Assemble the complete document
if formatType in ["md", "markdown"]:
# Markdown format
documentContent = f"# {title}\n\n"
if executiveSummary:
documentContent += f"## Executive Summary\n\n{executiveSummary}\n\n"
documentContent += f"{introduction}\n\n"
for i, sectionContent in enumerate(sections):
# Ensure section starts with heading if not already
sectionTitle = detailedStructure[i].get("title", f"Section {i+1}")
if not sectionContent.strip().startswith("#"):
documentContent += f"## {sectionTitle}\n\n"
documentContent += f"{sectionContent}\n\n"
documentContent += f"## Conclusion\n\n{conclusion}\n"
elif formatType == "html":
# HTML format
documentContent = f"<html>\n<head>\n<title>{title}</title>\n</head>\n<body>\n"
documentContent += f"<h1>{title}</h1>\n\n"
if executiveSummary:
documentContent += f"<h2>Executive Summary</h2>\n<div>{executiveSummary}</div>\n\n"
documentContent += f"<div>{introduction}</div>\n\n"
for i, sectionContent in enumerate(sections):
sectionTitle = detailedStructure[i].get("title", f"Section {i+1}")
documentContent += f"<h2>{sectionTitle}</h2>\n<div>{sectionContent}</div>\n\n"
documentContent += f"<h2>Conclusion</h2>\n<div>{conclusion}</div>\n"
documentContent += "</body>\n</html>"
else:
# Plain text format
documentContent = f"{title}\n{'=' * len(title)}\n\n"
if executiveSummary:
documentContent += f"EXECUTIVE SUMMARY\n{'-' * 17}\n\n{executiveSummary}\n\n"
documentContent += f"{introduction}\n\n"
for i, sectionContent in enumerate(sections):
sectionTitle = detailedStructure[i].get("title", f"Section {i+1}")
documentContent += f"{sectionTitle}\n{'-' * len(sectionTitle)}\n\n{sectionContent}\n\n"
documentContent += f"CONCLUSION\n{'-' * 10}\n\n{conclusion}\n"
# Create document object
return self.formatAgentDocumentOutput(outputLabel, documentContent, contentType)
except Exception as e:
logger.error(f"Error creating document: {str(e)}", exc_info=True)
# Create a simple error document
if formatType in ["md", "markdown"]:
content = f"# Error in Documentation\n\nThere was an error generating the documentation: {str(e)}"
elif formatType == "html":
content = f"<html><body><h1>Error in Documentation</h1><p>There was an error generating the documentation: {str(e)}</p></body></html>"
else:
content = f"Error in Documentation\n\nThere was an error generating the documentation: {str(e)}"
return {
"label": outputLabel,
"content": content,
"metadata": {
"contentType": contentType
}
}
# Factory function for the Documentation agent
def getAgentDocumentation():
"""Returns an instance of the Documentation agent."""
return AgentDocumentation()

View file

@ -1,158 +0,0 @@
"""
Authentication module for backend API.
Handles JWT-based authentication, token generation, and user context.
"""
from datetime import datetime, timedelta, timezone
from typing import Optional, Dict, Any, Tuple
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
import logging
from modules.gatewayInterface import getGatewayInterface
from modules.configuration import APP_CONFIG
# Get Config Data
SECRET_KEY = APP_CONFIG.get("APP_JWT_SECRET_SECRET")
ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM")
ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY"))
# OAuth2 Setup
oauth2Scheme = OAuth2PasswordBearer(tokenUrl="token")
# Logger
logger = logging.getLogger(__name__)
def createAccessToken(data: dict, expiresDelta: Optional[timedelta] = None) -> str:
"""
Creates a JWT Access Token.
Args:
data: Data to encode (usually user ID or username)
expiresDelta: Validity duration of the token (optional)
Returns:
JWT Token as string
"""
toEncode = data.copy()
if expiresDelta:
expire = datetime.now(timezone.utc) + expiresDelta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
toEncode.update({"exp": expire})
encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM)
return encodedJwt
async def getCurrentUser(token: str = Depends(oauth2Scheme)) -> Dict[str, Any]:
"""
Extracts and validates the current user from the JWT token.
Args:
token: JWT Token from the Authorization header
Returns:
User data
Raises:
HTTPException: For invalid token or user
"""
credentialsException = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
# Decode token
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
# Extract username from token
username: str = payload.get("sub")
if username is None:
raise credentialsException
# Extract mandate ID from token (if present)
mandateId: int = payload.get("mandateId", 1) # Default: Root mandate
except JWTError:
logger.warning("Invalid JWT Token")
raise credentialsException
# Initialize Gateway Interface without context
gateway = getGatewayInterface()
# Retrieve user from database
user = gateway.getUserByUsername(username)
if user is None:
logger.warning(f"User {username} not found")
raise credentialsException
if user.get("disabled", False):
logger.warning(f"User {username} is disabled")
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled")
return user
async def getCurrentActiveUser(currentUser: Dict[str, Any] = Depends(getCurrentUser)) -> Dict[str, Any]:
"""
Ensures that the user is active.
Args:
currentUser: Current user data
Returns:
User data
Raises:
HTTPException: If the user is disabled
"""
if currentUser.get("disabled", False):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled")
return currentUser
async def getUserContext(currentUser: Dict[str, Any]) -> Tuple[int, int]:
"""
Extracts the mandate ID and user ID from the current user.
Enhanced with better logging.
Args:
currentUser: The current user
Returns:
Tuple of (mandateId, userId)
"""
# Default values
defaultMandateId = 0
defaultUserId = 0
# Extract mandateId
mandateId = currentUser.get("mandateId", None)
if mandateId is None:
logger.warning(f"No mandateId found in currentUser, using default: {defaultMandateId}")
mandateId = defaultMandateId
else:
try:
mandateId = int(mandateId)
except (ValueError, TypeError):
logger.error(f"Invalid mandateId value: {mandateId}, using default: {defaultMandateId}")
mandateId = defaultMandateId
# Extract userId
userId = currentUser.get("id", None)
if userId is None:
logger.warning(f"No userId found in currentUser, using default: {defaultUserId}")
userId = defaultUserId
else:
try:
userId = int(userId)
except (ValueError, TypeError):
logger.error(f"Invalid userId value: {userId}, using default: {defaultUserId}")
userId = defaultUserId
return mandateId, userId

View file

@ -1,183 +0,0 @@
"""
Utility module for configuration management.
This module provides a global APP_CONFIG object for accessing configuration from both
config.ini files and environment variables stored in .env files, using a flat structure.
"""
import os
import logging
from typing import Any, Dict, Optional
from pathlib import Path
# Set up logging
logger = logging.getLogger(__name__)
class Configuration:
"""
Configuration class with attribute-style access to flattened configuration.
"""
def __init__(self):
"""Initialize the configuration object"""
self._data = {}
self._configFilePath = None
self._envFilePath = None
self._configMtime = 0
self._envMtime = 0
self.refresh()
def refresh(self):
"""Reload configuration from files"""
self._loadConfig()
self._loadEnv()
logger.info("Configuration refreshed")
def _loadConfig(self):
"""Load configuration from config.ini file in flattened format"""
# Find config.ini file (look in current directory and parent directory)
configPath = Path('config.ini')
if not configPath.exists():
# Try in parent directory
configPath = Path('../config.ini')
if not configPath.exists():
logger.warning(f"Configuration file not found at {configPath.absolute()}")
return
self._configFilePath = configPath
currentMtime = os.path.getmtime(configPath)
# Skip if file hasn't changed
if currentMtime <= self._configMtime:
return
self._configMtime = currentMtime
try:
with open(configPath, 'r') as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
continue
# Parse key-value pairs
if '=' in line:
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
# Add directly to data dictionary
self._data[key] = value
except Exception as e:
logger.error(f"Error loading configuration: {e}")
def _loadEnv(self):
"""Load environment variables from .env file"""
# Find .env file (look in current directory and parent directory)
envPath = Path('.env')
if not envPath.exists():
# Try in parent directory
envPath = Path('../.env')
if not envPath.exists():
logger.warning(f"Environment file not found at {envPath.absolute()}")
return
self._envFilePath = envPath
currentMtime = os.path.getmtime(envPath)
# Skip if file hasn't changed
if currentMtime <= self._envMtime:
return
self._envMtime = currentMtime
try:
with open(envPath, 'r') as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
continue
# Parse key-value pairs
if '=' in line:
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
# Add directly to data dictionary
self._data[key] = value
logger.info(f"Loaded environment variables from {envPath.absolute()}")
# Also load system environment variables (don't override existing)
for key, value in os.environ.items():
if key not in self._data:
self._data[key] = value
except Exception as e:
logger.error(f"Error loading environment variables: {e}")
def checkForUpdates(self):
"""Check if configuration files have changed and reload if necessary"""
if self._configFilePath and os.path.exists(self._configFilePath):
currentMtime = os.path.getmtime(self._configFilePath)
if currentMtime > self._configMtime:
logger.info("Config file has changed, reloading...")
self._loadConfig()
if self._envFilePath and os.path.exists(self._envFilePath):
currentMtime = os.path.getmtime(self._envFilePath)
if currentMtime > self._envMtime:
logger.info("Environment file has changed, reloading...")
self._loadEnv()
def get(self, key: str, default: Any = None) -> Any:
"""Get configuration value with optional default"""
self.checkForUpdates() # Check for file changes
if key in self._data:
value = self._data[key]
# Handle secrets (keys ending with _SECRET)
if key.endswith("_SECRET"):
return handleSecret(value)
return value
return default
def __getattr__(self, name: str) -> Any:
"""Enable attribute-style access to configuration"""
self.checkForUpdates() # Check for file changes
value = self.get(name)
if value is None:
raise AttributeError(f"Configuration key '{name}' not found")
return value
def __dir__(self) -> list:
"""Support auto-completion of attributes"""
self.checkForUpdates() # Check for file changes
return list(self._data.keys()) + super().__dir__()
def set(self, key: str, value: Any) -> None:
"""Set a configuration value (for testing/overrides)"""
self._data[key] = value
def handleSecret(value: str) -> str:
"""
Handle secret values. Currently just returns the plain text value,
but can be enhanced to provide actual decryption in the future.
Args:
value: The secret value to handle
Returns:
str: Processed secret value
"""
# For now, just return the value as-is
# In the future, this could be enhanced to decrypt values
return value
# Create the global APP_CONFIG instance
APP_CONFIG = Configuration()

View file

@ -1,796 +0,0 @@
"""
Webcrawler agent for research and retrieval of information from the web.
Reimagined with an output-first, AI-driven approach.
"""
import logging
import json
import re
import time
from typing import Dict, Any, List
from urllib.parse import quote_plus, unquote
from bs4 import BeautifulSoup
import requests
import markdown
from modules.workflowAgentsRegistry import AgentBase
from modules.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
class AgentWebcrawler(AgentBase):
"""AI-driven agent for web research and information retrieval"""
def __init__(self):
"""Initialize the webcrawler agent"""
super().__init__()
self.name = "webcrawler"
self.description = "Conducts web research and collects information from online sources"
self.capabilities = [
"webSearch",
"informationRetrieval",
"dataCollection",
"searchResultsAnalysis",
"webpageContentExtraction"
]
# Web crawling configuration
self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_MAX_URLS", "5"))
self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_KEYWORDS", "3"))
self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_RESULTS", "5"))
self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_TIMEOUT", "30"))
self.searchEngine = APP_CONFIG.get("Agent_Webcrawler_SEARCH_ENGINE", "https://html.duckduckgo.com/html/?q=")
self.userAgent = APP_CONFIG.get("Agent_Webcrawler_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")
def setDependencies(self, mydom=None):
"""Set external dependencies for the agent."""
self.mydom = mydom
async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a task by focusing on required outputs and using AI to guide the research process.
Args:
task: Task dictionary with prompt, inputDocuments, outputSpecifications
Returns:
Dictionary with feedback and documents
"""
try:
# Extract task information
prompt = task.get("prompt", "")
outputSpecs = task.get("outputSpecifications", [])
# Check AI service
if not self.mydom:
return {
"feedback": "The Webcrawler agent requires an AI service to function effectively.",
"documents": []
}
# Create research plan
researchPlan = await self._createResearchPlan(prompt)
# Check if this is truly a web research task
if not researchPlan.get("requiresWebResearch", True):
return {
"feedback": "This task doesn't appear to require web research. Please try a different agent.",
"documents": []
}
# Gather raw material through web research
rawResults = await self._gatherResearchMaterial(researchPlan)
# Format results into requested output documents
documents = await self._createOutputDocuments(
prompt,
rawResults,
outputSpecs,
researchPlan
)
# Generate feedback
feedback = researchPlan.get("feedback", f"I conducted web research on '{prompt[:50]}...' and gathered information from {len(rawResults)} relevant sources.")
return {
"feedback": feedback,
"documents": documents
}
except Exception as e:
logger.error(f"Error during web research: {str(e)}", exc_info=True)
return {
"feedback": f"Error during web research: {str(e)}",
"documents": []
}
async def _createResearchPlan(self, prompt: str) -> Dict[str, Any]:
"""
Use AI to create a detailed research plan.
Args:
prompt: The research query
Returns:
Research plan dictionary
"""
researchPrompt = f"""
Create a detailed web research plan for this task: "{prompt}"
Analyze the request carefully and create a structured plan in JSON format with the following elements:
{{
"requiresWebResearch": true/false, # Whether this genuinely requires web research
"researchQuestions": ["question1", "question2", ...], # 2-4 specific questions to answer
"searchTerms": ["term1", "term2", ...], # Up to {self.maxSearchTerms} effective search terms
"directUrls": ["url1", "url2", ...], # Any URLs directly mentioned in the request (up to {self.maxUrl})
"expectedSources": ["type1", "type2", ...], # Types of sources that would be most valuable
"contentFocus": "what specific content to extract or focus on",
"feedback": "explanation of how the research will be conducted"
}}
Respond with ONLY the JSON object, no additional text or explanations.
"""
try:
# Get research plan from AI
response = await self.mydom.callAi([
{"role": "system", "content": "You are a web research planning expert. Create precise research plans in JSON format only."},
{"role": "user", "content": researchPrompt}
])
# Extract JSON
jsonStart = response.find('{')
jsonEnd = response.rfind('}') + 1
if jsonStart >= 0 and jsonEnd > jsonStart:
plan = json.loads(response[jsonStart:jsonEnd])
# Ensure we have the expected fields with defaults if missing
if "searchTerms" not in plan:
plan["searchTerms"] = [prompt]
if "directUrls" not in plan:
plan["directUrls"] = []
if "researchQuestions" not in plan:
plan["researchQuestions"] = ["What information can be found about this topic?"]
return plan
else:
# Fallback plan
logger.warning(f"Not able creating research plan, generating fallback plan")
return {
"requiresWebResearch": True,
"researchQuestions": ["What information can be found about this topic?"],
"searchTerms": [prompt],
"directUrls": [],
"expectedSources": ["Web pages", "Articles"],
"contentFocus": "Relevant information about the topic",
"feedback": f"I'll conduct web research on '{prompt}' and gather relevant information."
}
except Exception as e:
logger.warning(f"Error creating research plan: {str(e)}")
# Simple fallback plan
return {
"requiresWebResearch": True,
"researchQuestions": ["What information can be found about this topic?"],
"searchTerms": [prompt],
"directUrls": [],
"expectedSources": ["Web pages", "Articles"],
"contentFocus": "Relevant information about the topic",
"feedback": f"I'll conduct web research on '{prompt}' and gather relevant information."
}
async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Gather research material based on the research plan.
Args:
researchPlan: Research plan dictionary
Returns:
List of research results
"""
allResults = []
# Process direct URLs
directUrls = researchPlan.get("directUrls", [])[:self.maxUrl]
for url in directUrls:
logger.info(f"Processing direct URL: {url}")
try:
# Fetch and extract content
soup = self._readUrl(url)
if soup:
# Extract title and content
title = self._extractTitle(soup, url)
content = self._extractMainContent(soup)
# Add to results
allResults.append({
"title": title,
"url": url,
"sourceType": "directUrl",
"content": content,
"summary": "" # Will be filled later
})
except Exception as e:
logger.warning(f"Error processing URL {url}: {str(e)}")
# Process search terms
searchTerms = researchPlan.get("searchTerms", [])[:self.maxSearchTerms]
for term in searchTerms:
logger.info(f"Searching for: {term}")
try:
# Perform search
searchResults = self._searchWeb(term)
# Process each search result
for result in searchResults:
# Check if URL is already in results
if not any(r["url"] == result["url"] for r in allResults):
allResults.append({
"title": result["title"],
"url": result["url"],
"sourceType": "searchResult",
"content": result["data"],
"snippet": result["snippet"],
"summary": "" # Will be filled later
})
# Stop if we've reached the maximum results
if len(allResults) >= self.maxResults:
break
except Exception as e:
logger.warning(f"Error searching for {term}: {str(e)}")
# Stop if we've reached the maximum results
if len(allResults) >= self.maxResults:
break
# Create summaries in parallel for all results
allResults = await self._summarizeAllResults(allResults, researchPlan)
return allResults
async def _summarizeAllResults(self, results: List[Dict[str, Any]], researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Create summaries for all research results.
Args:
results: List of research results
researchPlan: Research plan with questions and focus
Returns:
Results with added summaries
"""
for i, result in enumerate(results):
logger.info(f"Summarizing result {i+1}/{len(results)}: {result['title'][:30]}...")
try:
# Limit content length to avoid token issues
content = self._limitText(result.get("content", ""), maxChars=8000)
researchQuestions = researchPlan.get("researchQuestions", ["What relevant information does this page contain?"])
contentFocus = researchPlan.get("contentFocus", "Relevant information")
# Create summary using AI
summaryPrompt = f"""
Summarize this web page content based on these research questions:
{', '.join(researchQuestions)}
Focus on: {contentFocus}
Web page: {result['url']}
Title: {result['title']}
Content:
{content}
Create a concise summary that:
1. Directly answers the research questions if possible
2. Extracts the most relevant information from the page
3. Includes specific facts, figures, or quotes if available
4. Is around 2000 characters long
Only include information actually found in the content. No fabrications or assumptions.
"""
if self.mydom:
summary = await self.mydom.callAi([
{"role": "system", "content": "You summarize web content accurately and concisely, focusing only on what is actually in the content."},
{"role": "user", "content": summaryPrompt}
])
# Store the summary
result["summary"] = summary
else:
# Fallback if no AI service
logger.warning(f"Not able to summarize result, using fallback plan.")
result["summary"] = f"Content from {result['url']} ({len(content)} characters)"
except Exception as e:
logger.warning(f"Error summarizing result {i+1}: {str(e)}")
result["summary"] = f"Error creating summary: {str(e)}"
return results
async def _createOutputDocuments(self, prompt: str, results: List[Dict[str, Any]],
outputSpecs: List[Dict[str, Any]], researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Create output documents based on research results and specifications.
Args:
prompt: Original research prompt
results: List of research results
outputSpecs: Output specifications
researchPlan: Research plan
Returns:
List of output documents
"""
# If no output specs provided, create default output
if not outputSpecs:
outputSpecs = [{
"label": "webResearchResults.md",
"description": "Comprehensive web research results"
}]
# Generate documents
documents = []
# Process each output specification
for spec in outputSpecs:
outputLabel = spec.get("label", "")
outputDescription = spec.get("description", "")
# Determine format based on file extension
formatType = self._determineFormatType(outputLabel)
# Create appropriate document based on format
if formatType == "json":
# JSON output - structured data
document = await self._createJsonDocument(prompt, results, researchPlan, outputLabel)
elif formatType == "csv":
# CSV output - tabular data
document = await self._createCsvDocument(results, outputLabel)
else:
# Text-based output (markdown, html, text) - narrative report
document = await self._createNarrativeDocument(
prompt, results, researchPlan, formatType, outputLabel, outputDescription
)
documents.append(document)
return documents
async def _createNarrativeDocument(self, prompt: str, results: List[Dict[str, Any]],
researchPlan: Dict[str, Any], formatType: str,
outputLabel: str, outputDescription: str) -> Dict[str, Any]:
"""
Create a narrative document (markdown, html, text) from research results.
Args:
prompt: Original research prompt
results: Research results
researchPlan: Research plan
formatType: Output format (markdown, html, text)
outputLabel: Output filename
outputDescription: Output description
Returns:
Document object
"""
# Create content based on format
if formatType == "markdown":
contentType = "text/markdown"
templateFormat = "markdown"
elif formatType == "html":
contentType = "text/html"
templateFormat = "html"
else:
contentType = "text/plain"
templateFormat = "text"
# Prepare research context
researchQuestions = researchPlan.get("researchQuestions", [])
searchTerms = researchPlan.get("searchTerms", [])
# Create document structure based on results
sourcesSummary = []
for result in results:
sourcesSummary.append({
"title": result.get("title", "Untitled"),
"url": result.get("url", ""),
"summary": result.get("summary", ""),
"snippet": result.get("snippet", "")
})
# Truncate content for prompt
sourcesJson = json.dumps(sourcesSummary, indent=2)
if len(sourcesJson) > 10000:
# Logic to truncate each summary while preserving structure
for i in range(len(sourcesSummary)):
if len(sourcesJson) <= 10000:
break
# Gradually truncate summaries
sourcesSummary[i]["summary"] = sourcesSummary[i]["summary"][:500] + "..."
sourcesJson = json.dumps(sourcesSummary, indent=2)
# Create report prompt
reportPrompt = f"""
Create a comprehensive {formatType} research report based on the following web research:
TASK: {prompt}
RESEARCH QUESTIONS:
{', '.join(researchQuestions)}
SEARCH TERMS USED:
{', '.join(searchTerms)}
SOURCES AND FINDINGS:
{sourcesJson}
REPORT DETAILS:
- Format: {templateFormat}
- Filename: {outputLabel}
- Description: {outputDescription}
Create a well-structured report that:
1. Includes an executive summary of key findings
2. Addresses each research question directly
3. Integrates information from all relevant sources
4. Cites sources appropriately for each piece of information
5. Provides a comprehensive synthesis of the research
6. Is formatted professionally and appropriately for {templateFormat}
The report should be scholarly, accurate, and focused on the original research task.
"""
try:
# Generate report with AI
reportContent = await self.mydom.callAi([
{"role": "system", "content": f"You create professional research reports in {templateFormat} format."},
{"role": "user", "content": reportPrompt}
])
# Convert to HTML if needed
if formatType == "html" and not reportContent.lower().startswith("<html"):
# Check if it's markdown that needs conversion
if reportContent.startswith("#"):
reportContent = markdown.markdown(reportContent)
# Wrap in basic HTML structure if needed
if not reportContent.lower().startswith("<html"):
reportContent = f"<html><head><title>Web Research Results</title></head><body>{reportContent}</body></html>"
return self.formatAgentDocumentOutput(outputLabel, reportContent, contentType)
except Exception as e:
logger.error(f"Error creating narrative document: {str(e)}")
# Create error document
if formatType == "markdown":
content = f"# Web Research Error\n\nAn error occurred: {str(e)}"
elif formatType == "html":
content = f"<html><body><h1>Web Research Error</h1><p>An error occurred: {str(e)}</p></body></html>"
else:
content = f"WEB RESEARCH ERROR\n\nAn error occurred: {str(e)}"
return self.formatAgentDocumentOutput(outputLabel, content, contentType)
async def _createJsonDocument(self, prompt: str, results: List[Dict[str, Any]],
researchPlan: Dict[str, Any], outputLabel: str) -> Dict[str, Any]:
"""
Create a JSON document from research results.
Args:
prompt: Original research prompt
results: Research results
researchPlan: Research plan
outputLabel: Output filename
Returns:
Document object
"""
try:
# Create structured data
sourcesData = []
for result in results:
sourcesData.append({
"title": result.get("title", "Untitled"),
"url": result.get("url", ""),
"summary": result.get("summary", ""),
"snippet": result.get("snippet", ""),
"sourceType": result.get("sourceType", "")
})
# Create metadata
metadata = {
"query": prompt,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
"researchQuestions": researchPlan.get("researchQuestions", []),
"searchTerms": researchPlan.get("searchTerms", [])
}
# Compile complete report object
jsonContent = {
"metadata": metadata,
"summary": researchPlan.get("feedback", "Web research results"),
"sources": sourcesData
}
# Convert to JSON string
content = json.dumps(jsonContent, indent=2)
return self.formatAgentDocumentOutput(outputLabel, content, "application/json")
except Exception as e:
logger.error(f"Error creating JSON document: {str(e)}")
return self.formatAgentDocumentOutput(outputLabel, json.dumps({"error": str(e)}), "application/json")
async def _createCsvDocument(self, results: List[Dict[str, Any]], outputLabel: str) -> Dict[str, Any]:
"""
Create a CSV document from research results.
Args:
results: Research results
outputLabel: Output filename
Returns:
Document object
"""
try:
# Create CSV header
csvLines = ["Title,URL,Source Type,Snippet"]
# Add results
for result in results:
# Escape CSV fields
title = result.get("title", "").replace('"', '""')
url = result.get("url", "").replace('"', '""')
sourceType = result.get("sourceType", "").replace('"', '""')
snippet = result.get("snippet", "").replace('"', '""')
csvLines.append(f'"{title}","{url}","{sourceType}","{snippet}"')
# Combine into CSV content
content = "\n".join(csvLines)
return self.formatAgentDocumentOutput(outputLabel, content, "text/csv")
except Exception as e:
logger.error(f"Error creating CSV document: {str(e)}")
return self.formatAgentDocumentOutput(outputLabel, "Error,Error\nFailed to create CSV,{0}".format(str(e)), "text/csv")
def _determineFormatType(self, outputLabel: str) -> str:
"""
Determine the format type based on the filename.
Args:
outputLabel: Output filename
Returns:
Format type (markdown, html, text, json, csv)
"""
outputLabelLower = outputLabel.lower()
if outputLabelLower.endswith(".md"):
return "markdown"
elif outputLabelLower.endswith(".html"):
return "html"
elif outputLabelLower.endswith(".txt"):
return "text"
elif outputLabelLower.endswith(".json"):
return "json"
elif outputLabelLower.endswith(".csv"):
return "csv"
else:
# Default to markdown
return "markdown"
def _searchWeb(self, query: str) -> List[Dict[str, str]]:
"""
Conduct a web search and return the results.
Args:
query: The search query
Returns:
List of search results
"""
formattedQuery = quote_plus(query)
url = f"{self.searchEngine}{formattedQuery}"
searchResultsSoup = self._readUrl(url)
if not searchResultsSoup or not searchResultsSoup.select('.result'):
logger.warning(f"No search results found for: {query}")
return []
# Extract search results
results = []
# Find all result containers
resultElements = searchResultsSoup.select('.result')
for result in resultElements:
# Extract title
titleElement = result.select_one('.result__a')
title = titleElement.text.strip() if titleElement else 'No title'
# Extract URL (DuckDuckGo uses redirects)
urlElement = titleElement.get('href') if titleElement else ''
extractedUrl = 'No URL'
if urlElement:
# Extract actual URL from DuckDuckGo's redirect
if urlElement.startswith('/d.js?q='):
start = urlElement.find('?q=') + 3
end = urlElement.find('&', start) if '&' in urlElement[start:] else None
extractedUrl = unquote(urlElement[start:end])
# Ensure URL has correct protocol prefix
if not extractedUrl.startswith(('http://', 'https://')):
if not extractedUrl.startswith('//'):
extractedUrl = 'https://' + extractedUrl
else:
extractedUrl = 'https:' + extractedUrl
else:
extractedUrl = urlElement
# Extract snippet directly from search results page
snippetElement = result.select_one('.result__snippet')
snippet = snippetElement.text.strip() if snippetElement else 'No description'
# Get actual page content
try:
targetPageSoup = self._readUrl(extractedUrl)
content = self._extractMainContent(targetPageSoup)
except Exception as e:
logger.warning(f"Error extracting content from {extractedUrl}: {str(e)}")
content = f"Error extracting content: {str(e)}"
results.append({
'title': title,
'url': extractedUrl,
'snippet': snippet,
'data': content
})
# Limit number of results
if len(results) >= self.maxResults:
break
return results
def _readUrl(self, url: str) -> BeautifulSoup:
"""
Read a URL and return a BeautifulSoup parser for the content.
Args:
url: The URL to read
Returns:
BeautifulSoup object with the content or None on errors
"""
if not url or not url.startswith(('http://', 'https://')):
return None
headers = {
'User-Agent': self.userAgent,
'Accept': 'text/html,application/xhtml+xml,application/xml',
'Accept-Language': 'en-US,en;q=0.9',
}
try:
# Initial request
response = requests.get(url, headers=headers, timeout=self.timeout)
# Handling for status 202
if response.status_code == 202:
# Retry with backoff
backoffTimes = [0.5, 1.0, 2.0, 5.0]
for waitTime in backoffTimes:
time.sleep(waitTime)
response = requests.get(url, headers=headers, timeout=self.timeout)
if response.status_code != 202:
break
# Raise for error status codes
response.raise_for_status()
# Parse HTML
return BeautifulSoup(response.text, 'html.parser')
except Exception as e:
logger.error(f"Error reading URL {url}: {str(e)}")
return None
def _extractTitle(self, soup: BeautifulSoup, url: str) -> str:
"""
Extract the title from a webpage.
Args:
soup: BeautifulSoup object of the webpage
url: URL of the webpage
Returns:
Extracted title
"""
if not soup:
return f"Error with {url}"
# Extract title from title tag
titleTag = soup.find('title')
title = titleTag.text.strip() if titleTag else "No title"
# Alternative: Also look for h1 tags if title tag is missing
if title == "No title":
h1Tag = soup.find('h1')
if h1Tag:
title = h1Tag.text.strip()
return title
def _extractMainContent(self, soup: BeautifulSoup, maxChars: int = 10000) -> str:
"""
Extract the main content from an HTML page.
Args:
soup: BeautifulSoup object of the webpage
maxChars: Maximum number of characters
Returns:
Extracted main content as a string
"""
if not soup:
return ""
# Try to find main content elements in priority order
mainContent = None
for selector in ['main', 'article', '#content', '.content', '#main', '.main']:
content = soup.select_one(selector)
if content:
mainContent = content
break
# If no main content found, use the body
if not mainContent:
mainContent = soup.find('body') or soup
# Remove script, style, nav, footer elements that don't contribute to main content
for element in mainContent.select('script, style, nav, footer, header, aside, .sidebar, #sidebar, .comments, #comments, .advertisement, .ads, iframe'):
element.extract()
# Extract text content
textContent = mainContent.get_text(separator=' ', strip=True)
# Limit to maxChars
return textContent[:maxChars]
def _limitText(self, text: str, maxChars: int = 10000) -> str:
"""
Limit text to a maximum number of characters.
Args:
text: Input text
maxChars: Maximum number of characters
Returns:
Limited text
"""
if not text:
return ""
# If text is already under the limit, return unchanged
if len(text) <= maxChars:
return text
# Otherwise limit text to maxChars
return text[:maxChars] + "... [Content truncated due to length]"
# Factory function for the Webcrawler agent
def getAgentWebcrawler():
"""Returns an instance of the Webcrawler agent."""
return AgentWebcrawler()

View file

@ -1,123 +0,0 @@
from pydantic import BaseModel, Field
from typing import List, Dict, Any, Optional
# Define the model for attribute definitions
class AttributeDefinition(BaseModel):
name: str
label: str
type: str
required: bool = False
placeholder: Optional[str] = None
defaultValue: Optional[Any] = None
options: Optional[List[Dict[str, Any]]] = None
editable: bool = True
visible: bool = True
order: int = 0
validation: Optional[Dict[str, Any]] = None
helpText: Optional[str] = None
# Helper classes for type mapping
typeMappings = {
"int": "number",
"str": "string",
"float": "number",
"bool": "boolean",
"List[int]": "array",
"List[str]": "array",
"Dict[str, Any]": "object",
"Optional[str]": "string",
"Optional[int]": "number",
"Optional[Dict[str, Any]]": "object"
}
# Special field types based on naming conventions
specialFieldTypes = {
"content": "textarea",
"description": "textarea",
"instructions": "textarea",
"password": "password",
"email": "email",
"workspaceId": "select",
"agentId": "select",
"type": "select"
}
# Function to convert a Pydantic model into attribute definitions
def getModelAttributes(modelClass, userLanguage="de"):
"""
Converts a Pydantic model into a list of AttributeDefinition objects
"""
attributes = []
# Go through all fields in the model
for i, (fieldName, field) in enumerate(modelClass.__fields__.items()):
# Skip internal fields
if fieldName.startswith('_') or fieldName in ["label", "fieldLabels"]:
continue
# Determine the field type
fieldType = typeMappings.get(str(field.type_), "string")
# Check for special field types
if fieldName in specialFieldTypes:
fieldType = specialFieldTypes[fieldName]
# Get the label (if available)
fieldLabel = fieldName.replace('_', ' ').capitalize()
if hasattr(modelClass, 'fieldLabels') and fieldName in modelClass.fieldLabels:
labelObj = modelClass.fieldLabels[fieldName]
fieldLabel = labelObj.getLabel(userLanguage)
# Determine default values and required status
required = field.required
defaultValue = field.default if not field.required else None
# Check for validation rules
validation = None
if field.validators:
validation = {"hasValidators": True}
# Placeholder text
placeholder = f"Please enter {fieldLabel}"
# Special options for Select fields
options = None
if fieldType == "select":
if fieldName == "type" and modelClass.__name__ == "Agent":
options = [
{"value": "Analysis", "label": "Analysis"},
{"value": "Transformation", "label": "Transformation"},
{"value": "Generation", "label": "Generation"},
{"value": "Classification", "label": "Classification"},
{"value": "Custom", "label": "Custom"}
]
# Extract description from Field object
description = None
# Try to get description from various possible sources
if hasattr(field, 'field_info') and hasattr(field.field_info, 'description'):
description = field.field_info.description
elif hasattr(field, 'description'):
description = field.description
elif hasattr(field, 'schema') and hasattr(field.schema, 'description'):
description = field.schema.description
# Create attribute definition
attrDef = AttributeDefinition(
name=fieldName,
label=fieldLabel,
type=fieldType,
required=required,
placeholder=placeholder,
defaultValue=defaultValue,
options=options,
editable=fieldName not in ["id", "mandateId", "userId", "createdAt", "uploadDate"],
visible=fieldName not in ["hashedPassword", "mandateId", "userId"],
order=i,
validation=validation,
helpText=description or "" # Set empty string as default value if no description found
)
attributes.append(attrDef)
return attributes

View file

@ -1,471 +0,0 @@
"""
Interface to the Gateway system.
Manages users and mandates for authentication.
"""
import os
import logging
from typing import Dict, Any, List, Optional, Union
import importlib
from passlib.context import CryptContext
from connectors.connectorDbJson import DatabaseConnector
from modules.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
# Password-Hashing
pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
class GatewayInterface:
"""
Interface to the Gateway system.
Manages users and mandates.
"""
def __init__(self, mandateId: int = None, userId: int = None):
"""
Initializes the Gateway Interface with optional mandate and user context.
Args:
mandateId: ID of the current mandate (optional)
userId: ID of the current user (optional)
"""
# Context can be empty during initialization
self.mandateId = mandateId
self.userId = userId
# Import data model module
try:
self.modelModule = importlib.import_module("modules.gatewayModel")
logger.info("gatewayModel successfully imported")
except ImportError as e:
logger.error(f"Error importing gatewayModel: {e}")
raise
# Initialize database
self._initializeDatabase()
def _initializeDatabase(self):
"""
Initializes the database with minimal objects
"""
self.db = DatabaseConnector(
dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"),
dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"),
dbUser=APP_CONFIG.get("DB_SYSTEM_USER"),
dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"),
mandateId=self.mandateId if self.mandateId else 0,
userId=self.userId if self.userId else 0
)
# Create Root mandate if needed
existingMandateId = self.getInitialId("mandates")
mandates = self.db.getRecordset("mandates")
if existingMandateId is None or not mandates:
logger.info("Creating Root mandate")
rootMandate = {
"name": "Root",
"language": "de"
}
createdMandate = self.db.recordCreate("mandates", rootMandate)
logger.info(f"Root mandate created with ID {createdMandate['id']}")
# Update mandate context
self.mandateId = createdMandate['id']
self.userId = createdMandate['userId']
# Recreate connector with correct context
self.db = DatabaseConnector(
dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"),
dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"),
dbUser=APP_CONFIG.get("DB_SYSTEM_USER"),
dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"),
mandateId=self.mandateId,
userId=self.userId
)
# Create Admin user if needed
existingUserId = self.getInitialId("users")
users = self.db.getRecordset("users")
if existingUserId is None or not users:
logger.info("Creating Admin user")
adminUser = {
"mandateId": self.mandateId,
"username": "admin",
"email": "admin@example.com",
"fullName": "Administrator",
"disabled": False,
"language": "de",
"privilege": "sysadmin", # SysAdmin privilege
"hashedPassword": self._getPasswordHash("admin") # Use a secure password in production!
}
createdUser = self.db.recordCreate("users", adminUser)
logger.info(f"Admin user created with ID {createdUser['id']}")
# Update user context
self.userId = createdUser['id']
# Recreate connector with correct context
self.db = DatabaseConnector(
dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"),
dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"),
dbUser=APP_CONFIG.get("DB_SYSTEM_USER"),
dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"),
mandateId=self.mandateId,
userId=self.userId
)
def getInitialId(self, table: str) -> Optional[int]:
"""Returns the initial ID for a table"""
return self.db.getInitialId(table)
def _getPasswordHash(self, password: str) -> str:
"""Creates a hash for a password"""
return pwdContext.hash(password)
def _verifyPassword(self, plainPassword: str, hashedPassword: str) -> bool:
"""Checks if the password matches the hash"""
return pwdContext.verify(plainPassword, hashedPassword)
def _getCurrentTimestamp(self) -> str:
"""Returns the current timestamp in ISO format"""
from datetime import datetime
return datetime.now().isoformat()
# Mandate methods
def getAllMandates(self) -> List[Dict[str, Any]]:
"""Returns all mandates"""
return self.db.getRecordset("mandates")
def getMandate(self, mandateId: int) -> Optional[Dict[str, Any]]:
"""Returns a mandate by its ID"""
mandates = self.db.getRecordset("mandates", recordFilter={"id": mandateId})
if mandates:
return mandates[0]
return None
def createMandate(self, name: str, language: str = "de") -> Dict[str, Any]:
"""Creates a new mandate"""
mandateData = {
"name": name,
"language": language
}
return self.db.recordCreate("mandates", mandateData)
def updateMandate(self, mandateId: int, mandateData: Dict[str, Any]) -> Dict[str, Any]:
"""
Updates an existing mandate
Args:
mandateId: The ID of the mandate to update
mandateData: The mandate data to update
Returns:
Dict[str, Any]: The updated mandate data
Raises:
ValueError: If the mandate is not found
"""
# Check if the mandate exists
mandate = self.getMandate(mandateId)
if not mandate:
raise ValueError(f"Mandate with ID {mandateId} not found")
# Update the mandate
updatedMandate = self.db.recordModify("mandates", mandateId, mandateData)
return updatedMandate
def deleteMandate(self, mandateId: int) -> bool:
"""
Deletes a mandate and all associated users and data
Args:
mandateId: The ID of the mandate to delete
Returns:
bool: True if the mandate was successfully deleted, otherwise False
"""
# Check if the mandate exists
mandate = self.getMandate(mandateId)
if not mandate:
return False
# Check if it's the initial mandate
initialMandateId = self.getInitialId("mandates")
if initialMandateId is not None and mandateId == initialMandateId:
logger.warning(f"Attempt to delete the Root mandate was prevented")
return False
# Find all users of the mandate
users = self.getUsersByMandate(mandateId)
# Delete all users of the mandate and their associated data
for user in users:
self.deleteUser(user["id"])
# Delete the mandate
success = self.db.recordDelete("mandates", mandateId)
if success:
logger.info(f"Mandate with ID {mandateId} was successfully deleted")
else:
logger.error(f"Error deleting mandate with ID {mandateId}")
return success
# User methods
def getAllUsers(self) -> List[Dict[str, Any]]:
"""Returns all users"""
users = self.db.getRecordset("users")
# Remove password hashes from the response
for user in users:
if "hashedPassword" in user:
del user["hashedPassword"]
return users
def getUsersByMandate(self, mandateId: int) -> List[Dict[str, Any]]:
"""
Returns all users of a specific mandate
Args:
mandateId: The ID of the mandate
Returns:
List[Dict[str, Any]]: List of users in the mandate
"""
users = self.db.getRecordset("users", recordFilter={"mandateId": mandateId})
# Remove password hashes from the response
for user in users:
if "hashedPassword" in user:
del user["hashedPassword"]
return users
def getUserByUsername(self, username: str) -> Optional[Dict[str, Any]]:
"""Returns a user by username"""
users = self.db.getRecordset("users")
for user in users:
if user.get("username") == username:
return user
return None
def getUser(self, userId: int) -> Optional[Dict[str, Any]]:
"""Returns a user by ID"""
users = self.db.getRecordset("users", recordFilter={"id": userId})
if users:
user = users[0]
# Remove password hash from the API response
if "hashedPassword" in user:
userCopy = user.copy()
del userCopy["hashedPassword"]
return userCopy
return user
return None
def createUser(self, username: str, password: str, email: str = None,
fullName: str = None, language: str = "de", mandateId: int = None,
disabled: bool = False, privilege: str = "user") -> Dict[str, Any]:
"""
Creates a new user
Args:
username: The username
password: The password
email: The email address (optional)
fullName: The full name (optional)
language: The preferred language (default: "de")
mandateId: The ID of the mandate (optional)
disabled: Whether the user is disabled (default: False)
privilege: The privilege level (default: "user")
Returns:
Dict[str, Any]: The created user data
Raises:
ValueError: If the username already exists
"""
# Check if the username already exists
existingUser = self.getUserByUsername(username)
if existingUser:
raise ValueError(f"User '{username}' already exists")
# Use the provided mandateId or the current context
userMandateId = mandateId if mandateId is not None else self.mandateId
userData = {
"mandateId": userMandateId,
"username": username,
"email": email,
"fullName": fullName,
"disabled": disabled,
"language": language,
"privilege": privilege,
"hashedPassword": self._getPasswordHash(password)
}
createdUser = self.db.recordCreate("users", userData)
# Remove password hash from the response
if "hashedPassword" in createdUser:
del createdUser["hashedPassword"]
return createdUser
def authenticateUser(self, username: str, password: str) -> Optional[Dict[str, Any]]:
"""
Authenticates a user by username and password
Args:
username: The username
password: The password
Returns:
Optional[Dict[str, Any]]: The user data or None if authentication fails
"""
user = self.getUserByUsername(username)
if not user:
return None
if not self._verifyPassword(password, user.get("hashedPassword", "")):
return None
# Check if the user is disabled
if user.get("disabled", False):
return None
# Create a copy without password hash
authenticatedUser = {**user}
if "hashedPassword" in authenticatedUser:
del authenticatedUser["hashedPassword"]
return authenticatedUser
def updateUser(self, userId: int, userData: Dict[str, Any]) -> Dict[str, Any]:
"""
Updates a user
Args:
userId: The ID of the user to update
userData: The user data to update
Returns:
Dict[str, Any]: The updated user data
Raises:
ValueError: If the user is not found
"""
# Get the current user with password hash (directly from DB)
users = self.db.getRecordset("users", recordFilter={"id": userId})
if not users:
raise ValueError(f"User with ID {userId} not found")
user = users[0]
# If the password is being changed, hash it
if "password" in userData:
userData["hashedPassword"] = self._getPasswordHash(userData["password"])
del userData["password"]
# Update the user
updatedUser = self.db.recordModify("users", userId, userData)
# Remove password hash from the response
if "hashedPassword" in updatedUser:
del updatedUser["hashedPassword"]
return updatedUser
def disableUser(self, userId: int) -> Dict[str, Any]:
"""Disables a user"""
return self.updateUser(userId, {"disabled": True})
def enableUser(self, userId: int) -> Dict[str, Any]:
"""Enables a user"""
return self.updateUser(userId, {"disabled": False})
def _deleteUserReferencedData(self, userId: int) -> None:
"""
Deletes all data associated with a user
Args:
userId: The ID of the user
"""
# Here all tables are searched and all entries referencing this user are deleted
# Delete user attributes
try:
attributes = self.db.getRecordset("attributes", recordFilter={"userId": userId})
for attribute in attributes:
self.db.recordDelete("attributes", attribute["id"])
except Exception as e:
logger.error(f"Error deleting attributes for user {userId}: {e}")
# Other tables that might reference the user
# (Depending on the application's database structure)
logger.info(f"All referenced data for user {userId} has been deleted")
def deleteUser(self, userId: int) -> bool:
"""
Deletes a user and all associated data
Args:
userId: The ID of the user to delete
Returns:
bool: True if the user was successfully deleted, otherwise False
"""
# Check if the user exists
users = self.db.getRecordset("users", recordFilter={"id": userId})
if not users:
return False
# Check if it's the initial user
initialUserId = self.getInitialId("users")
if initialUserId is not None and userId == initialUserId:
logger.warning("Attempt to delete the Root Admin was prevented")
return False
# Delete all data associated with the user
self._deleteUserReferencedData(userId)
# Delete the user
success = self.db.recordDelete("users", userId)
if success:
logger.info(f"User with ID {userId} was successfully deleted")
else:
logger.error(f"Error deleting user with ID {userId}")
return success
# Singleton factory for GatewayInterface instances per context
_gatewayInterfaces = {}
def getGatewayInterface(mandateId: int = None, userId: int = None) -> GatewayInterface:
"""
Returns a GatewayInterface instance for the specified context.
Reuses existing instances.
Args:
mandateId: ID of the mandate
userId: ID of the user
Returns:
GatewayInterface instance
"""
contextKey = f"{mandateId}_{userId}"
if contextKey not in _gatewayInterfaces:
_gatewayInterfaces[contextKey] = GatewayInterface(mandateId, userId)
return _gatewayInterfaces[contextKey]
# Initialize the interface
getGatewayInterface()

View file

@ -1,103 +0,0 @@
"""
Data models for the gateway system.
"""
from pydantic import BaseModel, Field
from typing import List, Dict, Any, Optional
from datetime import datetime
class Label(BaseModel):
"""Label for an attribute or a class with support for multiple languages"""
default: str
translations: Dict[str, str] = {}
def getLabel(self, language: str = None):
"""Returns the label in the specified language, or the default value if not available"""
if language and language in self.translations:
return self.translations[language]
return self.default
class Mandate(BaseModel):
"""Data model for a mandate"""
id: int = Field(description="Unique ID of the mandate")
name: str = Field(description="Name of the mandate")
language: str = Field(description="Default language of the mandate")
label: Label = Field(
default=Label(default="Mandate", translations={"en": "Mandate", "fr": "Mandat"}),
description="Label for the class"
)
# Labels for attributes
fieldLabels: Dict[str, Label] = {
"id": Label(default="ID", translations={}),
"name": Label(default="Name of the mandate", translations={"en": "Mandate name", "fr": "Nom du mandat"}),
"language": Label(default="Language", translations={"en": "Language", "fr": "Langue"})
}
class User(BaseModel):
"""Data model for a user"""
id: int = Field(description="Unique ID of the user")
mandateId: int = Field(description="ID of the associated mandate")
username: str = Field(description="Username for login")
email: Optional[str] = Field(None, description="Email address of the user")
fullName: Optional[str] = Field(None, description="Full name of the user")
language: str = Field(description="Preferred language of the user")
disabled: Optional[bool] = Field(False, description="Indicates whether the user is disabled")
privilege: str = Field(description="Permission level") #sysadmin,admin,user
label: Label = Field(
default=Label(default="User", translations={"en": "User", "fr": "Utilisateur"}),
description="Label for the class"
)
# Labels for attributes
fieldLabels: Dict[str, Label] = {
"id": Label(default="ID", translations={}),
"mandateId": Label(default="Mandate ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}),
"username": Label(default="Username", translations={"en": "Username", "fr": "Nom d'utilisateur"}),
"email": Label(default="Email", translations={"en": "Email", "fr": "E-mail"}),
"fullName": Label(default="Full name", translations={"en": "Full name", "fr": "Nom complet"}),
"language": Label(default="Language", translations={"en": "Language", "fr": "Langue"}),
"disabled": Label(default="Disabled", translations={"en": "Disabled", "fr": "Désactivé"}),
"privilege": Label(default="Permission level", translations={"en": "Access level", "fr": "Niveau d'accès"}),
}
class UserInDB(User):
"""Extended user class with password hash"""
hashedPassword: str = Field(description="Hash of the user password")
label: Label = Field(
default=Label(default="User Access", translations={"en": "User Access", "fr": "Accès de l'utilisateur"}),
description="Label for the class"
)
# Additional label for the password field
fieldLabels: Dict[str, Label] = {
"hashedPassword": Label(default="Password hash", translations={"en": "Password hash", "fr": "Hachage de mot de passe"})
}
class Token(BaseModel):
"""Data model for an authentication token"""
accessToken: str = Field(description="The issued access token")
tokenType: str = Field(description="Type of token (usually 'bearer')")
label: Label = Field(
default=Label(default="Token", translations={"en": "Token", "fr": "Jeton"}),
description="Label for the class"
)
# Labels for attributes
fieldLabels: Dict[str, Label] = {
"accessToken": Label(default="Access token", translations={"en": "Access token", "fr": "Jeton d'accès"}),
"tokenType": Label(default="Token type", translations={"en": "Token type", "fr": "Type de jeton"})
}
class TokenData(BaseModel):
"""Data for token decoding and validation"""
username: Optional[str] = None
mandateId: Optional[int] = None
exp: Optional[datetime] = None

View file

@ -1,933 +0,0 @@
"""
Module for extracting content from various file formats.
Provides specialized functions for processing text, PDF, Office documents, images, etc.
"""
import logging
import os
import io
from typing import Dict, Any, List, Optional, Union, Tuple
import base64
# Configure logger
logger = logging.getLogger(__name__)
# Optional imports - only loaded when needed
pdfExtractorLoaded = False
officeExtractorLoaded = False
imageProcessorLoaded = False
def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]:
"""
Main function for extracting content from a file based on its MIME type.
Delegates to specialized extraction functions.
Args:
fileMetadata: File metadata (Name, MIME type, etc.)
fileContent: Binary data of the file
Returns:
List of Document-Content objects with metadata and base64Encoded flag
"""
try:
mimeType = fileMetadata.get("mimeType", "application/octet-stream")
fileName = fileMetadata.get("name", "unknown")
logger.info(f"Extracting content from file '{fileName}' (MIME type: {mimeType})")
# Extract content based on MIME type
contents = []
# Text-based formats (excluding CSV which has its own handler)
if mimeType == "text/csv":
contents.extend(extractCsvContent(fileName, fileContent))
# Then handle other text-based formats
elif mimeType.startswith("text/") or mimeType in [
"application/json",
"application/xml",
"application/javascript",
"application/x-python"
]:
contents.extend(extractTextContent(fileName, fileContent, mimeType))
# SVG Files
elif mimeType == "image/svg+xml":
contents.extend(extractSvgContent(fileName, fileContent))
# Images
elif mimeType.startswith("image/"):
contents.extend(extractImageContent(fileName, fileContent, mimeType))
# PDF Documents
elif mimeType == "application/pdf":
contents.extend(extractPdfContent(fileName, fileContent))
# Word Documents
elif mimeType in [
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/msword"
]:
contents.extend(extractWordContent(fileName, fileContent, mimeType))
# Excel Documents
elif mimeType in [
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.ms-excel"
]:
contents.extend(extractExcelContent(fileName, fileContent, mimeType))
# PowerPoint Documents
elif mimeType in [
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"application/vnd.ms-powerpoint"
]:
contents.extend(extractPowerpointContent(fileName, fileContent, mimeType))
# Binary data as fallback for unknown formats
else:
contents.extend(extractBinaryContent(fileName, fileContent, mimeType))
# Fallback when no content could be extracted
if not contents:
logger.warning(f"No content extracted from file '{fileName}', using binary fallback")
# Convert binary content to base64
encoded_data = base64.b64encode(fileContent).decode('utf-8')
contents.append({
"sequenceNr": 1,
"name": '1_undefined',
"ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin",
"contentType": mimeType,
"data": encoded_data,
"base64Encoded": True,
"metadata": {
"isText": False
}
})
# Add generic attributes for all documents
for content in contents:
# Make sure all content items have the base64Encoded flag
if "base64Encoded" not in content:
if isinstance(content.get("data"), bytes):
# Convert bytes to base64
content["data"] = base64.b64encode(content["data"]).decode('utf-8')
content["base64Encoded"] = True
else:
# Assume text content if not explicitly marked
content["base64Encoded"] = False
# Maintain backward compatibility with old "base64Encoded" flag in metadata
if "metadata" not in content:
content["metadata"] = {}
# Set base64Encoded in metadata for backward compatibility
content["metadata"]["base64Encoded"] = content["base64Encoded"]
logger.info(f"Successfully extracted {len(contents)} content items from file '{fileName}'")
return contents
except Exception as e:
logger.error(f"Error during content extraction: {str(e)}")
# Fallback on error - return original data
return [{
"sequenceNr": 1,
"name": fileMetadata.get("name", "unknown"),
"ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin",
"contentType": fileMetadata.get("mimeType", "application/octet-stream"),
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"base64Encoded": True # For backward compatibility
}
}]
def _loadPdfExtractor():
"""Loads PDF extraction libraries when needed"""
global pdfExtractorLoaded
if not pdfExtractorLoaded:
try:
global PyPDF2, fitz
import PyPDF2
import fitz # PyMuPDF for more extensive PDF processing
pdfExtractorLoaded = True
logger.info("PDF extraction libraries successfully loaded")
except ImportError as e:
logger.warning(f"PDF extraction libraries could not be loaded: {e}")
def _loadOfficeExtractor():
"""Loads Office document extraction libraries when needed"""
global officeExtractorLoaded
if not officeExtractorLoaded:
try:
global docx, openpyxl
import docx # python-docx for Word documents
import openpyxl # for Excel files
officeExtractorLoaded = True
logger.info("Office extraction libraries successfully loaded")
except ImportError as e:
logger.warning(f"Office extraction libraries could not be loaded: {e}")
def _loadImageProcessor():
"""Loads image processing libraries when needed"""
global imageProcessorLoaded
if not imageProcessorLoaded:
try:
global PIL, Image
from PIL import Image
imageProcessorLoaded = True
logger.info("Image processing libraries successfully loaded")
except ImportError as e:
logger.warning(f"Image processing libraries could not be loaded: {e}")
def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
"""
Extracts text from text files.
Args:
fileName: Name of the file
fileContent: Binary data of the file
mimeType: MIME type of the file
Returns:
List of Text-Content objects with base64Encoded = False
"""
try:
# Keep original file extension
fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "txt"
# Extract text content
textContent = fileContent.decode('utf-8')
return [{
"sequenceNr": 1,
"name": "1_text", # Simplified naming
"ext": fileExtension,
"contentType": "text",
"data": textContent,
"base64Encoded": False,
"metadata": {
"isText": True
}
}]
except UnicodeDecodeError:
logger.warning(f"Could not decode text from file '{fileName}' as UTF-8, trying alternative encodings")
try:
# Try alternative encodings
for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
try:
textContent = fileContent.decode(encoding)
logger.info(f"Text successfully decoded with encoding {encoding}")
return [{
"sequenceNr": 1,
"name": "1_text", # Simplified naming
"ext": fileExtension,
"contentType": "text",
"data": textContent,
"base64Encoded": False,
"metadata": {
"isText": True,
"encoding": encoding
}
}]
except UnicodeDecodeError:
continue
# Fallback to binary data if no encoding works
logger.warning(f"Could not decode text, using binary data")
return [{
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False
}
}]
except Exception as e:
logger.error(f"Error in alternative text decoding: {str(e)}")
# Return binary data as fallback
return [{
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False
}
}]
def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]:
"""
Extracts content from CSV files.
Args:
fileName: Name of the file
fileContent: Binary data of the file
Returns:
List of CSV-Content objects with base64Encoded = False
"""
try:
# Extract text content
csvContent = fileContent.decode('utf-8')
return [{
"sequenceNr": 1,
"name": "1_csv", # Simplified naming
"ext": "csv",
"contentType": "csv",
"data": csvContent,
"base64Encoded": False,
"metadata": {
"isText": True,
"format": "csv"
}
}]
except UnicodeDecodeError:
logger.warning(f"Could not decode CSV from file '{fileName}' as UTF-8, trying alternative encodings")
try:
# Try alternative encodings for CSV
for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
try:
csvContent = fileContent.decode(encoding)
logger.info(f"CSV successfully decoded with encoding {encoding}")
return [{
"sequenceNr": 1,
"name": "1_csv", # Simplified naming
"ext": "csv",
"contentType": "csv",
"data": csvContent,
"base64Encoded": False,
"metadata": {
"isText": True,
"encoding": encoding,
"format": "csv"
}
}]
except UnicodeDecodeError:
continue
# Fallback to binary data
return [{
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "csv",
"contentType": "text/csv",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False
}
}]
except Exception as e:
logger.error(f"Error in alternative CSV decoding: {str(e)}")
return [{
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "csv",
"contentType": "text/csv",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False
}
}]
def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]:
"""
Extracts content from SVG files.
Args:
fileName: Name of the file
fileContent: Binary data of the file
Returns:
List of SVG-Content objects with dual text/image metadata
"""
contents = []
try:
# Extract SVG as text content (XML)
svgText = fileContent.decode('utf-8')
# Check if it's actually SVG by looking for the SVG tag
if "<svg" in svgText.lower():
# SVG is both text (XML) and an image
contents.append({
"sequenceNr": 1,
"name": "1_svg", # Simplified naming
"ext": "svg",
"contentType": "image/svg+xml",
"data": svgText,
"base64Encoded": False,
"metadata": {
"isText": True, # SVG is text-based (XML)
"format": "svg",
"isImage": True # But also represents an image
}
})
else:
# Doesn't appear to be a valid SVG file
logger.warning(f"File '{fileName}' has SVG extension but does not contain SVG markup")
contents.append({
"sequenceNr": 1,
"name": "1_text",
"ext": "svg",
"contentType": "text/plain",
"data": svgText,
"base64Encoded": False,
"metadata": {
"isText": True,
"format": "text"
}
})
except UnicodeDecodeError:
logger.warning(f"Could not decode SVG from file '{fileName}' as UTF-8, trying alternative encodings")
try:
# Try alternative encodings
for encoding in ['latin-1', 'cp1252', 'iso-8859-1']:
try:
svgText = fileContent.decode(encoding)
if "<svg" in svgText.lower():
logger.info(f"SVG successfully decoded with encoding {encoding}")
contents.append({
"sequenceNr": 1,
"name": "1_svg", # Simplified naming
"ext": "svg",
"contentType": "image/svg+xml",
"data": svgText,
"base64Encoded": False,
"metadata": {
"isText": True,
"format": "svg",
"isImage": True,
"encoding": encoding
}
})
break
except UnicodeDecodeError:
continue
# Fallback to binary data if no encoding works
if not contents:
logger.warning(f"Could not decode SVG text, using binary data")
contents.append({
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "svg",
"contentType": "image/svg+xml",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "svg",
"isImage": True
}
})
except Exception as e:
logger.error(f"Error in alternative SVG decoding: {str(e)}")
# Return binary data as fallback
contents.append({
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": "svg",
"contentType": "image/svg+xml",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "svg",
"isImage": True
}
})
return contents
def extractImageContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
"""
Extracts content from image files and optionally generates metadata descriptions.
Args:
fileName: Name of the file
fileContent: Binary data of the file
mimeType: MIME type of the file
Returns:
List of Image-Content objects with base64Encoded = True
"""
# Extract file extension from MIME type or filename
fileExtension = mimeType.split('/')[-1]
if fileExtension == "jpeg":
fileExtension = "jpg"
# If possible, analyze image and extract metadata
imageMetadata = {
"isText": False,
"format": "image"
}
imageDescription = None
try:
_loadImageProcessor()
if imageProcessorLoaded and fileContent and len(fileContent) > 0:
with io.BytesIO(fileContent) as imgStream:
try:
img = Image.open(imgStream)
# Check if the image was actually loaded
img.verify()
# To safely continue working, reload
imgStream.seek(0)
img = Image.open(imgStream)
imageMetadata.update({
"format": img.format,
"mode": img.mode,
"width": img.width,
"height": img.height
})
# Extract EXIF data if available
if hasattr(img, '_getexif') and callable(img._getexif):
exif = img._getexif()
if exif:
exifData = {}
for tagId, value in exif.items():
exifData[f"tag_{tagId}"] = str(value)
imageMetadata["exif"] = exifData
# Generate image description
imageDescription = f"Image ({img.width}x{img.height}, {img.format}, {img.mode})"
except Exception as innerE:
logger.warning(f"Error processing image: {str(innerE)}")
imageMetadata["error"] = str(innerE)
imageDescription = f"Image (unable to process: {str(innerE)})"
except Exception as e:
logger.warning(f"Could not extract image metadata: {str(e)}")
imageMetadata["error"] = str(e)
# Convert binary image to base64
encoded_data = base64.b64encode(fileContent).decode('utf-8')
# Return image content
contents = [{
"sequenceNr": 1,
"name": "1_image", # Simplified naming
"ext": fileExtension,
"contentType": "image",
"data": encoded_data,
"base64Encoded": True,
"metadata": imageMetadata
}]
# If image description available, add as additional text content
if imageDescription:
contents.append({
"sequenceNr": 2,
"name": "2_text_image_info", # Simplified naming with label
"ext": "txt",
"contentType": "text",
"data": imageDescription,
"base64Encoded": False,
"metadata": {
"isText": True,
"imageDescription": True
}
})
return contents
def extractPdfContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]:
"""
Extracts text and images from PDF files.
Args:
fileName: Name of the file
fileContent: Binary data of the file
Returns:
List of PDF-Content objects (text and images) with appropriate base64Encoded flags
"""
contents = []
extractedContentFound = False
try:
# Load PDF extraction libraries
_loadPdfExtractor()
if not pdfExtractorLoaded:
logger.warning("PDF extraction not possible: Libraries not available")
# Add original file as binary content
contents.append({
"sequenceNr": 1,
"name": "1_pdf", # Simplified naming
"ext": "pdf",
"contentType": "application/pdf",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "pdf"
}
})
return contents
# Extract text with PyPDF2
extractedText = ""
pdfMetadata = {}
with io.BytesIO(fileContent) as pdfStream:
pdfReader = PyPDF2.PdfReader(pdfStream)
# Extract metadata
pdfInfo = pdfReader.metadata or {}
for key, value in pdfInfo.items():
if key.startswith('/'):
pdfMetadata[key[1:]] = value
else:
pdfMetadata[key] = value
# Extract text from all pages
for pageNum in range(len(pdfReader.pages)):
page = pdfReader.pages[pageNum]
pageText = page.extract_text()
if pageText:
extractedText += f"--- Page {pageNum + 1} ---\n{pageText}\n\n"
# If text was found, add as separate content
if extractedText.strip():
extractedContentFound = True
contents.append({
"sequenceNr": len(contents) + 1,
"name": f"{len(contents) + 1}_text", # Simplified naming
"ext": "txt",
"contentType": "text",
"data": extractedText,
"base64Encoded": False,
"metadata": {
"isText": True,
"source": "pdf",
"pages": len(pdfReader.pages),
"pdfMetadata": pdfMetadata
}
})
# Extract images with PyMuPDF (fitz)
try:
with io.BytesIO(fileContent) as pdfStream:
doc = fitz.open(stream=pdfStream, filetype="pdf")
imageCount = 0
for pageNum in range(len(doc)):
page = doc[pageNum]
imageList = page.get_images(full=True)
for imgIndex, imgInfo in enumerate(imageList):
try:
imageCount += 1
xref = imgInfo[0]
baseImage = doc.extract_image(xref)
imageBytes = baseImage["image"]
imageExt = baseImage["ext"]
# Add image as content - encode as base64
extractedContentFound = True
contents.append({
"sequenceNr": len(contents) + 1,
"name": f"{len(contents) + 1}_image_page{pageNum+1}_{imgIndex+1}", # Simplified naming with label
"ext": imageExt,
"contentType": f"image/{imageExt}",
"data": base64.b64encode(imageBytes).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"source": "pdf",
"page": pageNum + 1,
"index": imgIndex
}
})
except Exception as imgE:
logger.warning(f"Error extracting image {imgIndex} on page {pageNum + 1}: {str(imgE)}")
# Close document
doc.close()
except Exception as imgExtractE:
logger.warning(f"Error extracting images from PDF: {str(imgExtractE)}")
except Exception as e:
logger.error(f"Error in PDF extraction: {str(e)}")
# If no content was extracted, add the original PDF
if not extractedContentFound:
contents.append({
"sequenceNr": 1,
"name": "1_pdf", # Simplified naming
"ext": "pdf",
"contentType": "application/pdf",
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "pdf"
}
})
return contents
def extractWordContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
"""
Extracts text and images from Word documents.
Args:
fileName: Name of the file
fileContent: Binary data of the file
mimeType: MIME type of the file
Returns:
List of Word-Content objects (text and possibly images) with appropriate base64Encoded flags
"""
contents = []
extractedContentFound = False
# Determine file extension
fileExtension = "docx" if mimeType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" else "doc"
try:
# Load Office extraction libraries
_loadOfficeExtractor()
if not officeExtractorLoaded:
logger.warning("Word extraction not possible: Libraries not available")
# Add original file as binary content
contents.append({
"sequenceNr": 1,
"name": "1_word", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "word"
}
})
return contents
# Only supports DOCX (newer format)
if mimeType == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
with io.BytesIO(fileContent) as docxStream:
doc = docx.Document(docxStream)
# Extract text
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
# Extract tables
for table in doc.tables:
for row in table.rows:
rowText = []
for cell in row.cells:
rowText.append(cell.text)
fullText.append(" | ".join(rowText))
extractedText = "\n\n".join(fullText)
# Add extracted text as content
if extractedText.strip():
extractedContentFound = True
contents.append({
"sequenceNr": 1,
"name": "1_text", # Simplified naming
"ext": "txt",
"contentType": "text",
"data": extractedText,
"base64Encoded": False,
"metadata": {
"isText": True,
"source": "docx",
"paragraphCount": len(doc.paragraphs),
"tableCount": len(doc.tables)
}
})
else:
logger.warning(f"Extraction from old Word format (DOC) not supported")
except Exception as e:
logger.error(f"Error in Word extraction: {str(e)}")
# If no content was extracted, add the original document
if not extractedContentFound:
contents.append({
"sequenceNr": 1,
"name": "1_word", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "word"
}
})
return contents
def extractExcelContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
"""
Extracts table data from Excel files.
Args:
fileName: Name of the file
fileContent: Binary data of the file
mimeType: MIME type of the file
Returns:
List of Excel-Content objects with appropriate base64Encoded flags
"""
contents = []
extractedContentFound = False
# Determine file extension
fileExtension = "xlsx" if mimeType == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" else "xls"
try:
# Load Office extraction libraries
_loadOfficeExtractor()
if not officeExtractorLoaded:
logger.warning("Excel extraction not possible: Libraries not available")
# Add original file as binary content
contents.append({
"sequenceNr": 1,
"name": "1_excel", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "excel"
}
})
return contents
# Only supports XLSX (newer format)
if mimeType == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
with io.BytesIO(fileContent) as xlsxStream:
workbook = openpyxl.load_workbook(xlsxStream, data_only=True)
# Extract each worksheet as separate CSV content
for sheetIndex, sheetName in enumerate(workbook.sheetnames):
sheet = workbook[sheetName]
# Format data as CSV
csvRows = []
for row in sheet.iter_rows():
csvRow = []
for cell in row:
value = cell.value
if value is None:
csvRow.append("")
else:
csvRow.append(str(value).replace('"', '""'))
csvRows.append(','.join(f'"{cell}"' for cell in csvRow))
csvContent = "\n".join(csvRows)
# Add as CSV content
if csvContent.strip():
extractedContentFound = True
sheetSafeName = sheetName.replace(" ", "_").replace("/", "_").replace("\\", "_")
contents.append({
"sequenceNr": len(contents) + 1,
"name": f"{len(contents) + 1}_csv_{sheetSafeName}", # Simplified naming with sheet label
"ext": "csv",
"contentType": "csv",
"data": csvContent,
"base64Encoded": False,
"metadata": {
"isText": True,
"source": "xlsx",
"sheet": sheetName,
"format": "csv"
}
})
else:
logger.warning(f"Extraction from old Excel format (XLS) not supported")
except Exception as e:
logger.error(f"Error in Excel extraction: {str(e)}")
# If no content was extracted, add the original document
if not extractedContentFound:
contents.append({
"sequenceNr": 1,
"name": "1_excel", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "excel"
}
})
return contents
def extractPowerpointContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
"""
Extracts content from PowerPoint presentations.
Args:
fileName: Name of the file
fileContent: Binary data of the file
mimeType: MIME type of the file
Returns:
List of PowerPoint-Content objects with base64Encoded = True
"""
# For PowerPoint, we currently only return the original binary file
# A complete extraction would require more specialized libraries
fileExtension = "pptx" if mimeType == "application/vnd.openxmlformats-officedocument.presentationml.presentation" else "ppt"
return [{
"sequenceNr": 1,
"name": "1_powerpoint", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "powerpoint"
}
}]
def extractBinaryContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]:
"""
Fallback for binary files where no specific extraction is possible.
Args:
fileName: Name of the file
fileContent: Binary data of the file
mimeType: MIME type of the file
Returns:
List with a binary Content object with base64Encoded = True
"""
fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin"
return [{
"sequenceNr": 1,
"name": "1_binary", # Simplified naming
"ext": fileExtension,
"contentType": mimeType,
"data": base64.b64encode(fileContent).decode('utf-8'),
"base64Encoded": True,
"metadata": {
"isText": False,
"format": "binary"
}
}]

File diff suppressed because it is too large Load diff

View file

@ -1,67 +0,0 @@
"""
Utility functions for MIME type handling and file format determination.
"""
def isTextMimeType(mimeType: str) -> bool:
"""
Determines if a MIME type represents a text format that should not be base64 encoded.
Args:
mimeType: The MIME type to check
Returns:
True if the content is a text format, False otherwise
"""
return (
mimeType.startswith("text/") or
mimeType in [
"application/json",
"application/xml",
"application/javascript",
"application/x-python",
"image/svg+xml"
]
)
def determineContentEncoding(fileName: str, content: any, mimeType: str = None) -> bool:
"""
Determines if content should be base64 encoded based on file type and MIME type.
Args:
fileName: Name of the file including extension
content: The content of the file
mimeType: Optional MIME type of the content
Returns:
True if content should be base64 encoded, False otherwise
"""
# If MIME type is provided, use it for determination
if mimeType:
if isTextMimeType(mimeType):
return False if isinstance(content, str) else True
# Import here to avoid circular imports
import os
# Extract file extension
_, extension = os.path.splitext(fileName)
extension = extension.lower().lstrip('.')
# Determine if we should base64 encode based on file type
text_extensions = {'txt', 'csv', 'json', 'xml', 'html', 'md', 'svg', 'js', 'css', 'py'}
# If it's a text format and content is a string, don't base64 encode
if extension in text_extensions and isinstance(content, str):
return False
# For binary formats, always base64 encode
binary_extensions = {'jpg', 'jpeg', 'png', 'gif', 'pdf', 'doc', 'docx', 'xls', 'xlsx', 'zip', 'rar'}
if extension in binary_extensions:
return True
# If content is bytes, base64 encode regardless of extension
if isinstance(content, bytes):
return True
# Default for unknown types
return not isinstance(content, str)

View file

@ -1,608 +0,0 @@
# Comprehensive System Documentation for Workflow Management
# Introduction
Welcome to the "Comprehensive System Documentation for Workflow Management." This manual serves as an essential resource for technical professionals seeking an in-depth understanding of our workflow management system. It is meticulously crafted to provide clarity and insight into the system's architecture, functionality, and operational intricacies.
## Purpose and Scope
The primary purpose of this document is to offer a detailed exposition of the workflow management system, focusing on its state machine design, system architecture, and error handling mechanisms. This manual aims to equip system architects, developers, and technical analysts with the knowledge required to effectively utilize, maintain, and enhance the system.
## Context and Background
In today's fast-paced digital environment, efficient workflow management is crucial for optimizing business processes and ensuring seamless operations. Our system is designed to address these needs by providing a robust framework that supports dynamic state transitions and error resilience. By leveraging state machine principles, the system ensures that workflows are executed consistently and reliably, adapting to various operational scenarios.
## Document Outline
Readers will find a comprehensive exploration of the following key topics:
- **State Machine**: An in-depth analysis of the state machine model employed within the system, detailing its role in managing workflow states and transitions.
- **Workflow Management**: A thorough examination of the workflow management processes, including task orchestration and process automation.
- **System Architecture**: A detailed overview of the system's architectural design, highlighting the components and their interactions.
- **Error Handling**: Insights into the error handling strategies implemented to ensure system robustness and reliability.
Each section is designed to provide technical insights and practical guidance, supported by code examples and diagrams where applicable.
## Tone and Audience
This manual is written in a formal tone, tailored for a technical audience with a foundational understanding of system design and workflow management principles. It aims to engage readers by presenting complex information in a structured and accessible manner, facilitating a deeper comprehension of the system's capabilities and potential applications.
We trust that this documentation will serve as a valuable tool in your technical endeavors, enhancing your ability to leverage our workflow management system to its fullest potential.
# Introduction
## Overview of the System
The Workflow Management System is a sophisticated software solution designed to streamline and automate the management of backend chat workflows. At its core, the system employs a state machine-based approach, allowing for efficient handling of complex workflow processes. This system is integral to ensuring that chat interactions are processed in a structured and consistent manner, thereby enhancing operational efficiency and user satisfaction.
The system's architecture is modular, with the **Workflow Manager Module** serving as the central component. This module is responsible for initializing workflows, managing their states, and ensuring seamless transitions between different stages of the workflow. By leveraging unique identifiers such as workflow ID, mandate ID, and user ID, the system ensures precise tracking and management of each workflow instance.
## Purpose of the Documentation
This documentation serves as a comprehensive guide for technical users who are involved in the development, maintenance, and optimization of the Workflow Management System. It aims to provide detailed insights into the system's architecture, functionality, and interconnections between various components. By offering a thorough understanding of the system, this manual facilitates effective troubleshooting, customization, and enhancement of the workflow management processes.
Key objectives of this documentation include:
- Providing a detailed description of the system's components and their interactions.
- Offering guidance on the initialization and management of workflows.
- Explaining the state machine approach and its application within the system.
- Assisting technical users in identifying and resolving potential issues.
## Scope and Limitations
### Scope
This documentation covers all aspects of the Workflow Management System, with a focus on the **workflowManager.py** module. It includes:
- Detailed descriptions of the system's architecture and components.
- Step-by-step instructions for initializing and managing workflows.
- An explanation of the state machine methodology and its implementation.
- Examples and scenarios illustrating typical workflow processes.
### Limitations
While this documentation provides an extensive overview of the Workflow Management System, it is important to note the following limitations:
- It does not cover external systems or integrations that may interact with the workflow management processes.
- The documentation assumes a foundational understanding of state machines and workflow management principles.
- It does not provide exhaustive coverage of every potential use case or customization scenario.
By understanding the scope and limitations outlined above, users can effectively utilize this documentation to enhance their interaction with the Workflow Management System, ensuring optimal performance and reliability.
# System Architecture
The System Architecture section provides a detailed overview of the structural design of the Workflow Management System. This section aims to elucidate the components, their interactions, and the data flow within the system, offering a comprehensive understanding for technical audiences.
## Component Overview
The Workflow Management System is designed around a modular architecture, primarily leveraging a state machine-based approach to manage backend chat workflows. Below is a detailed description of the core components:
### Workflow Manager Module
- **Purpose**: The Workflow Manager is the central component responsible for orchestrating chat workflows. It utilizes a state machine to ensure that workflows progress through predefined states in a controlled manner.
- **Functionality**:
- **Workflow Initialization**: This sub-component is responsible for initializing new workflows or loading existing ones. It assigns unique identifiers such as workflow ID, mandate ID, and user ID, ensuring each workflow instance is distinct and traceable.
- **State Management**: Manages transitions between different states of the workflow, ensuring that each step is executed in the correct sequence.
- **Event Handling**: Listens for and processes events that trigger state transitions, maintaining the integrity and continuity of the workflow.
### Interaction with Other Components
- **Database Interface**: The Workflow Manager interacts with a database to persist workflow states and retrieve necessary data. This ensures that workflows can be paused and resumed without loss of information.
- **User Interface**: Provides feedback and updates to the user interface, allowing users to monitor the progress of workflows and receive notifications about state changes.
- **External Services**: Integrates with external services for tasks such as authentication, notifications, and data retrieval, ensuring that workflows can leverage external capabilities seamlessly.
## Data Flow Diagram
The data flow within the Workflow Management System is structured to ensure efficient and reliable processing of information. Below is a high-level overview of the data flow and dependencies:
### Data Flow Description
1. **Workflow Initialization**:
- Data is retrieved from the database to initialize or resume workflows.
- Unique identifiers are generated and stored for tracking purposes.
2. **State Transition**:
- Upon receiving an event, the Workflow Manager processes the event and determines the next state.
- State changes are logged and updated in the database to maintain a history of the workflow.
3. **User Interaction**:
- The system sends updates to the user interface, providing real-time feedback on workflow progress.
- User inputs are captured and processed to influence workflow decisions.
4. **External Service Integration**:
- Data is exchanged with external services to perform specific tasks, such as sending notifications or fetching additional data.
- Responses from external services are processed and used to update the workflow state.
### Dependencies
- **Database**: The system relies heavily on a robust database to store workflow states, user information, and historical data. This dependency ensures data persistence and consistency across sessions.
- **External APIs**: Integration with external APIs is crucial for extending the functionality of workflows, allowing the system to perform complex operations beyond its core capabilities.
In summary, the Workflow Management System's architecture is designed to be modular, scalable, and efficient, with clearly defined components and data flows that ensure reliable operation and ease of maintenance. This architecture supports the dynamic nature of chat workflows, providing a robust framework for managing complex interactions.
# Module Descriptions
This section provides a detailed overview of the modules within the "Comprehensive System Documentation for Workflow Management." Each module is described in terms of its purpose, functionality, key functions, and methods. This documentation is intended for a technical audience and aims to offer a thorough understanding of the system's components and their interconnections.
## Workflow Manager Module
The Workflow Manager Module is a critical component of the system, responsible for managing chat workflows through a state machine-based approach. This module ensures that workflows are executed efficiently and consistently, providing a robust framework for handling complex interactions.
### Purpose and Functionality
The primary purpose of the Workflow Manager Module is to manage the lifecycle of chat workflows. It leverages a state machine to handle transitions between different states, ensuring that workflows progress smoothly and adhere to predefined rules. This module is essential for maintaining the integrity and efficiency of the workflow management system.
### Key Functions and Methods
1. **Workflow Initialization**
- **Functionality**: Initializes a new workflow or loads an existing one.
- **Key Methods**:
- `initialize_workflow(workflow_id, mandate_id, user_id)`: Sets up a new workflow with a unique identifier and associates it with a specific mandate and user.
- `load_existing_workflow(workflow_id)`: Retrieves and loads an existing workflow from the database.
2. **State Management**
- **Functionality**: Manages the transitions between different states within a workflow.
- **Key Methods**:
- `transition_to_state(new_state)`: Transitions the workflow to a new state, ensuring all conditions for the transition are met.
- `get_current_state()`: Returns the current state of the workflow, allowing for monitoring and debugging.
3. **Event Handling**
- **Functionality**: Processes events that trigger state transitions.
- **Key Methods**:
- `handle_event(event_type, event_data)`: Processes incoming events, determining the appropriate state transition based on the event type and data.
- `register_event_listener(listener)`: Allows external components to register listeners for specific events, facilitating integration with other system components.
4. **Error Handling and Recovery**
- **Functionality**: Ensures robust error handling and recovery mechanisms are in place.
- **Key Methods**:
- `handle_error(error_code, error_message)`: Manages errors by logging them and initiating recovery procedures.
- `recover_from_failure()`: Attempts to recover the workflow to a stable state after a failure.
## State Machine Implementation
The State Machine Implementation is a foundational aspect of the Workflow Manager Module, providing the logic and structure necessary for managing state transitions within workflows.
### Purpose and Functionality
The state machine is designed to model the dynamic behavior of workflows, allowing for precise control over state transitions. It ensures that workflows adhere to defined rules and constraints, preventing invalid transitions and maintaining system stability.
### Key Functions and Methods
1. **State Definition**
- **Functionality**: Defines the possible states within a workflow.
- **Key Methods**:
- `define_state(state_name, entry_action, exit_action)`: Establishes a new state with specific entry and exit actions, facilitating controlled transitions.
2. **Transition Logic**
- **Functionality**: Governs the logic for transitioning between states.
- **Key Methods**:
- `add_transition(from_state, to_state, condition)`: Adds a transition rule between states, specifying the condition under which the transition is valid.
- `evaluate_transition(current_state, event)`: Evaluates whether a transition should occur based on the current state and incoming event.
3. **State Persistence**
- **Functionality**: Ensures that state information is persistently stored and retrievable.
- **Key Methods**:
- `save_state(workflow_id, state)`: Persists the current state of a workflow to the database.
- `load_state(workflow_id)`: Retrieves the last known state of a workflow, enabling continuity after system restarts.
4. **Debugging and Monitoring**
- **Functionality**: Provides tools for monitoring and debugging state transitions.
- **Key Methods**:
- `log_state_transition(from_state, to_state)`: Logs each state transition for auditing and debugging purposes.
- `get_transition_history(workflow_id)`: Retrieves the history of state transitions for a specific workflow, aiding in analysis and troubleshooting.
This comprehensive description of the Workflow Manager Module and State Machine Implementation provides a detailed understanding of their roles and functionalities within the workflow management system. The structured approach ensures that workflows are managed efficiently, with robust mechanisms for handling state transitions and errors.
# Workflow Management
This section provides a detailed overview of the workflow management functionality within the system, focusing on the initialization and setup of workflows, state transitions, and error handling mechanisms. This documentation is intended for technical audiences who require a comprehensive understanding of the workflow management processes.
## Workflow Initialization
The workflow initialization process is a critical step in setting up and managing chat workflows within the system. This subsection details the procedures and components involved in initializing workflows.
### Key Components
- **Workflow Manager Module**: The core component responsible for implementing a state machine to manage chat workflows effectively.
- **Unique Identifiers**: Each workflow is initialized with a unique ID, mandate ID, and user ID to ensure distinct tracking and management.
- **Initialization Parameters**: The workflow setup includes parameters such as the initial state, user roles, and permissions, which are essential for defining the workflow's operational context.
### Initialization Process
1. **New Workflow Creation**:
- The system allows for the creation of new workflows by assigning a unique workflow ID.
- Initial parameters are set, including user roles and initial state configuration.
2. **Loading Existing Workflows**:
- Existing workflows can be loaded into the system using their unique identifiers.
- The system retrieves and restores the workflow's state and context from persistent storage.
3. **Setup Confirmation**:
- Upon initialization, the system confirms the setup by logging the workflow details and ensuring all parameters are correctly configured.
## Workflow States
This subsection describes the state transitions within the workflow management system, focusing on how workflows progress through various states.
### State Machine Overview
- **State Definitions**: Each workflow consists of predefined states, such as "Initialized," "In Progress," "Completed," and "Error."
- **Transition Rules**: The system enforces rules that dictate permissible transitions between states, ensuring logical progression and preventing invalid state changes.
### State Transition Process
1. **State Change Triggers**:
- Transitions are triggered by specific events or conditions, such as user actions or system notifications.
- Each trigger is associated with a corresponding state change, which is logged for audit purposes.
2. **State Validation**:
- Before a state transition occurs, the system validates the transition against predefined rules to ensure it is permissible.
- Invalid transitions are rejected, and appropriate error messages are generated.
3. **State Update**:
- Upon successful validation, the workflow's state is updated, and the system notifies relevant stakeholders of the change.
- The updated state is persisted in the system to maintain consistency and reliability.
## Error Handling Mechanisms
Effective error handling is crucial for maintaining the integrity and reliability of the workflow management system. This subsection outlines the mechanisms in place to manage errors.
### Error Detection
- **Monitoring and Alerts**: The system continuously monitors workflows for anomalies and generates alerts when errors are detected.
- **Error Logging**: All errors are logged with detailed information, including timestamps, error codes, and descriptions, to facilitate troubleshooting.
### Error Resolution
1. **Automated Recovery**:
- The system attempts to automatically resolve common errors through predefined recovery procedures.
- Successful recoveries are logged, and workflows are returned to a stable state.
2. **Manual Intervention**:
- For errors that cannot be resolved automatically, the system provides detailed error reports to administrators.
- Administrators can manually intervene to correct the issue and resume normal workflow operations.
3. **Error Escalation**:
- Critical errors that impact system stability are escalated to higher-level support teams for immediate attention.
- Escalation procedures include detailed documentation of the error and its impact on the system.
By understanding these components and processes, technical users can effectively manage and troubleshoot workflows within the system, ensuring smooth and reliable operations.
# Integration and Dependencies
This section provides a detailed overview of the integration points and dependencies within the Workflow Management system. It is crucial for understanding how the system interacts with external systems and manages its dependencies to ensure seamless operation. The section is divided into two main subsections: External Systems and APIs and Endpoints.
## External Systems
The Workflow Management system is designed to interact with various external systems to enhance its functionality and provide comprehensive workflow solutions. These integrations are essential for data exchange, process automation, and extending the capabilities of the system. Below are the key external systems integrated with the Workflow Management system:
1. **Customer Relationship Management (CRM) Systems**:
- The system integrates with popular CRM platforms to fetch and update customer data, ensuring that workflows are informed by the latest customer interactions and information.
2. **Enterprise Resource Planning (ERP) Systems**:
- Integration with ERP systems allows the Workflow Management system to access and utilize enterprise-wide data, facilitating more informed decision-making within workflows.
3. **Communication Platforms**:
- The system connects with various communication platforms (e.g., email, messaging apps) to send notifications and updates, ensuring that all stakeholders are informed of workflow progress and changes.
4. **Data Analytics Tools**:
- By integrating with data analytics tools, the system can provide insights and reports based on workflow data, aiding in performance tracking and optimization.
## APIs and Endpoints
The Workflow Management system exposes several APIs and endpoints that allow for seamless integration with external systems and facilitate communication between different components of the system. These APIs are designed to be robust, secure, and easy to use, enabling developers to extend and customize the system as needed.
### Key APIs
1. **Workflow Initialization API**:
- **Endpoint**: `/api/workflow/init`
- **Method**: POST
- **Description**: Initializes a new workflow or loads an existing one. Requires parameters such as unique ID, mandate ID, and user ID.
- **Example Request**:
```json
{
"unique_id": "12345",
"mandate_id": "67890",
"user_id": "user_001"
}
```
2. **State Transition API**:
- **Endpoint**: `/api/workflow/transition`
- **Method**: POST
- **Description**: Manages state transitions within a workflow. This API ensures that workflows progress through predefined states based on specific triggers or conditions.
- **Example Request**:
```json
{
"workflow_id": "12345",
"current_state": "pending",
"next_state": "approved"
}
```
3. **Notification API**:
- **Endpoint**: `/api/notifications/send`
- **Method**: POST
- **Description**: Sends notifications to users or systems based on workflow events. Supports multiple communication channels.
- **Example Request**:
```json
{
"recipient": "user_001",
"message": "Your workflow has been approved.",
"channel": "email"
}
```
### Dependency Management
Effective dependency management is critical for the stability and performance of the Workflow Management system. The system relies on several libraries and frameworks, which are managed through a package manager to ensure compatibility and ease of updates.
- **Python Libraries**: The system utilizes various Python libraries for state management, API handling, and data processing. These libraries are specified in a `requirements.txt` file, which can be used to install all necessary dependencies using pip.
- **Version Control**: Dependencies are version-controlled to prevent compatibility issues. The system is regularly updated to incorporate the latest stable versions of libraries, ensuring security and performance enhancements.
- **Testing and Validation**: Before integrating new dependencies or updating existing ones, thorough testing is conducted to validate their compatibility and performance within the system.
In conclusion, the integration and dependencies of the Workflow Management system are meticulously managed to ensure robust performance and seamless interaction with external systems. This section provides a comprehensive understanding of how these integrations and dependencies are structured and maintained.
# Usage and Examples
This section provides detailed guidance on how to effectively use the Workflow Management System, illustrating both basic and advanced scenarios. It includes examples of workflows and common use cases to help users understand the system's capabilities and applications.
## Basic Usage
The Workflow Management System is designed to streamline the management of chat workflows using a state machine-based approach. Below are the fundamental steps to get started with the system:
### Workflow Initialization
To begin using the system, you must initialize a workflow. This process involves setting up a new workflow or loading an existing one. Each workflow is identified by a unique ID, along with a mandate ID and user ID. The initialization process ensures that the workflow is correctly configured to handle subsequent operations.
**Example:**
```python
from workflowManager import WorkflowManager
# Initialize a new workflow
workflow = WorkflowManager.initialize_workflow(
unique_id="workflow123",
mandate_id="mandate456",
user_id="user789"
)
```
### Managing Workflow States
Once a workflow is initialized, it can transition between various states. The state machine manages these transitions, ensuring that the workflow progresses logically from one state to the next.
**Example:**
```python
# Transition to the next state
workflow.transition_to_next_state()
# Check current state
current_state = workflow.get_current_state()
print(f"Current State: {current_state}")
```
### Completing a Workflow
After all necessary states have been processed, the workflow can be completed. This marks the end of the workflow's lifecycle.
**Example:**
```python
# Complete the workflow
workflow.complete_workflow()
```
## Advanced Scenarios
For more complex use cases, the Workflow Management System offers advanced functionalities that cater to intricate workflow requirements.
### Conditional State Transitions
In some scenarios, state transitions may depend on specific conditions or external inputs. The system allows for conditional logic to be incorporated into the workflow.
**Example:**
```python
# Conditional transition based on external input
if workflow.check_condition("condition_met"):
workflow.transition_to_state("next_state")
```
### Parallel Workflow Execution
The system supports the execution of parallel workflows, enabling multiple workflows to run concurrently without interference.
**Example:**
```python
# Initialize multiple workflows
workflow1 = WorkflowManager.initialize_workflow("workflow1", "mandate1", "user1")
workflow2 = WorkflowManager.initialize_workflow("workflow2", "mandate2", "user2")
# Execute workflows in parallel
workflow1.transition_to_next_state()
workflow2.transition_to_next_state()
```
### Error Handling and Recovery
The system is equipped with robust error handling mechanisms to manage exceptions and ensure workflow continuity. In the event of an error, the system can revert to a safe state or retry operations.
**Example:**
```python
try:
workflow.transition_to_next_state()
except WorkflowError as e:
print(f"Error encountered: {e}")
workflow.revert_to_previous_state()
```
## Common Use Cases
The Workflow Management System is versatile and can be applied to various domains. Below are some common use cases:
- **Customer Support Chatbots**: Automating customer interactions by managing conversation states and responses.
- **Order Processing Systems**: Handling order states from initiation to completion, including payment and delivery.
- **Project Management Tools**: Tracking project phases and tasks, ensuring timely transitions and updates.
By understanding these examples and scenarios, users can leverage the Workflow Management System to optimize their processes and enhance operational efficiency.
# Troubleshooting and FAQs
This section provides guidance on resolving common issues and answers frequently asked questions related to the Workflow Management system. It also outlines available support and resources for further assistance.
## Common Issues
### 1. Workflow Initialization Errors
**Issue**: Errors occur during the initialization of a new workflow or when loading an existing one.
**Solution**:
- Ensure that all required parameters (unique ID, mandate ID, user ID) are correctly provided.
- Verify that the database connection is active and accessible.
- Check for any syntax errors in the configuration files.
**Example**: If you encounter an error message like `Initialization failed: Missing mandate ID`, double-check that the mandate ID is included in the initialization call.
### 2. State Transition Failures
**Issue**: The state machine fails to transition between states as expected.
**Solution**:
- Confirm that all state transition rules are correctly defined in the workflow configuration.
- Ensure that the current state is valid and that the transition conditions are met.
- Review the logs for any error messages that might indicate the cause of the failure.
**Example**: If a transition from `Pending` to `Approved` does not occur, check the conditions defined for this transition in the `workflowManager.py` file.
### 3. Performance Degradation
**Issue**: The system experiences slow performance during workflow processing.
**Solution**:
- Optimize database queries to reduce execution time.
- Increase system resources such as CPU and memory if necessary.
- Review the workflow logic for any inefficient loops or redundant operations.
**Example**: If processing a workflow takes significantly longer than expected, analyze the database query logs to identify slow queries.
## FAQs
### What is the purpose of the Workflow Manager Module?
The Workflow Manager Module implements a state machine to manage chat workflows, ensuring that each workflow progresses through predefined states based on specific conditions and triggers.
### How do I add a new state to the workflow?
To add a new state, update the workflow configuration file with the new state definition and specify the allowed transitions to and from this state. Ensure that the state machine logic in `workflowManager.py` is updated accordingly.
### Can I customize the workflow for different user roles?
Yes, workflows can be customized based on user roles by defining role-specific states and transitions in the configuration. Ensure that role-based access controls are implemented to enforce these customizations.
### Where can I find logs for debugging purposes?
Logs are typically stored in the `/var/log/workflow_manager/` directory. You can configure the logging level and output location in the system's configuration file.
### How do I contact support for further assistance?
For additional support, please contact our technical support team via email at support@workflowmanagement.com or call our helpline at +1-800-555-0199. Our support team is available 24/7 to assist with any issues.
## Support and Resources
- **Documentation**: Refer to the [Comprehensive System Documentation](#) for detailed information on system functionalities and configurations.
- **Community Forum**: Join our [User Community Forum](#) to discuss issues and share solutions with other users.
- **Training Sessions**: Sign up for our [Online Training Sessions](#) to enhance your understanding of the Workflow Management system.
For further inquiries, please refer to our [Support Page](#) for more resources and contact information.
# Appendices
This section provides additional resources and information to support the understanding and application of the "Comprehensive System Documentation for Workflow Management". It includes a glossary of terms used throughout the documentation and references for further reading.
## Glossary
This glossary defines key terms and concepts used in the workflow management system documentation. Understanding these terms is essential for comprehending the system's functionality and operations.
- **State Machine**: A computational model used to design algorithms. It consists of a finite number of states, transitions between these states, and actions, particularly useful in managing workflows where the system's state changes in response to events.
- **Workflow**: A sequence of processes through which a piece of work passes from initiation to completion. In this context, it refers to the automated processes managed by the system to handle chat interactions.
- **Workflow Initialization**: The process of setting up a new workflow instance or loading an existing one. This involves assigning unique identifiers and setting initial parameters.
- **Module**: A self-contained unit of code that encapsulates a specific functionality within the system. In the workflow management system, modules handle distinct aspects of workflow operations.
- **Backend**: The server-side part of the application, responsible for managing data, business logic, and workflows, as opposed to the frontend, which is the user interface.
- **Chat Workflow**: A specific type of workflow designed to manage interactions in a chat environment, ensuring that messages are processed and responded to according to predefined rules.
- **Unique ID**: A distinct identifier assigned to each workflow instance to differentiate it from others, ensuring accurate tracking and management.
- **Mandate ID**: An identifier used to associate a workflow with a specific mandate or task, providing context and purpose to the workflow's operations.
- **User ID**: An identifier that associates a workflow with a specific user, enabling personalized interactions and tracking.
## References
This section lists resources and literature that provide additional insights and information on workflow management systems, state machines, and related technologies. These references are valuable for readers seeking to deepen their understanding or explore advanced topics.
1. **"Design Patterns: Elements of Reusable Object-Oriented Software" by Erich Gamma, Richard Helm, Ralph Johnson, and John Vlissides**
- This book provides foundational knowledge on design patterns, including state machines, which are crucial for understanding workflow management systems.
2. **"Workflow Management: Models, Methods, and Systems" by Wil van der Aalst and Kees van Hee**
- A comprehensive resource on workflow management, covering theoretical models and practical implementations.
3. **"Finite State Machines in Software Development" by David M. Beazley**
- An article that explores the application of finite state machines in software development, offering practical examples and insights.
4. **"The Art of Scalability: Scalable Web Architecture, Processes, and Organizations for the Modern Enterprise" by Martin L. Abbott and Michael T. Fisher**
- This book discusses scalable architectures, including workflow management systems, providing strategies for building robust and efficient systems.
5. **Online Resources**:
- [State Machine Design Patterns](https://www.example.com/state-machine-design-patterns)
- [Workflow Management Coalition](https://www.example.com/workflow-management-coalition)
These resources are recommended for further exploration and understanding of the concepts and technologies underpinning the workflow management system described in this documentation.
## Conclusion
# Conclusion
In this "Comprehensive System Documentation for Workflow Management," we have meticulously explored the intricate components and functionalities that constitute the workflow management system. This manual has been crafted with a technical audience in mind, aiming to provide a thorough understanding of the system's architecture and operational dynamics.
## Summary of Key Points
1. **State Machine**: We delved into the state machine's pivotal role in managing the transitions and states within the workflow. The documentation detailed how state machines ensure the system's robustness and flexibility, allowing for seamless state transitions and efficient workflow management.
2. **Workflow Management**: The core principles and methodologies of workflow management were outlined, emphasizing the system's capability to streamline processes and enhance productivity. We discussed various workflow scenarios and how the system adapts to different operational needs.
3. **System Architecture**: A comprehensive overview of the system architecture was provided, highlighting the interconnections between various components. This section elucidated how each module interacts within the system, ensuring a cohesive and efficient workflow management environment.
4. **Error Handling**: Effective error handling mechanisms were described, showcasing the system's resilience and ability to maintain operational integrity. We covered strategies for identifying, logging, and resolving errors to minimize disruptions and maintain workflow continuity.
## Closure and Recommendations
This documentation serves as a foundational resource for understanding and optimizing the workflow management system. By detailing the system's components and their interactions, we have provided a roadmap for both current operations and future enhancements.
### Recommendations:
- **Continuous Monitoring**: Implement ongoing monitoring to identify potential bottlenecks or inefficiencies within the workflow. Regular audits can help maintain optimal performance and adaptability.
- **System Updates**: Stay abreast of technological advancements and consider integrating new tools or methodologies that could enhance the system's capabilities.
- **Training and Development**: Encourage continuous learning and development for team members to ensure they are proficient in utilizing the system to its fullest potential.
## Final Thoughts
The significance of this documentation lies in its ability to demystify the complexities of workflow management systems. By providing a clear and detailed account of the system's architecture and operations, we empower technical teams to effectively manage and optimize workflows. This manual not only serves as a guide but also as a catalyst for innovation and efficiency within the organization.
Through this documentation, we hope to have equipped you with the knowledge and insights necessary to harness the full potential of your workflow management system, fostering an environment of continuous improvement and success.

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(1000)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -1,44 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
# REQUIREMENTS:
def is_prime(n: int) -> bool:
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count: int) -> list:
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
def save_primes_to_file(primes: list, filename: str) -> None:
try:
with open(filename, 'w') as file:
for prime in primes:
file.write(f"{prime}\n")
except IOError as e:
print(f"An error occurred while writing to the file: {e}")
def main():
prime_count = 650
primes = generate_primes(prime_count)
output_filename = "primes.txt"
save_primes_to_file(primes, output_filename)
result = {output_filename: "\n".join(map(str, primes))}
return result
result = main()

View file

@ -1,13 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\ndef is_prime(n: int) -> bool:\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count: int) -> list:\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\ndef save_primes_to_file(primes: list, filename: str) -> None:\n try:\n with open(filename, 'w') as file:\n for prime in primes:\n file.write(f\"{prime}\\n\")\n except IOError as e:\n print(f\"An error occurred while writing to the file: {e}\")\n\ndef main():\n prime_count = 650\n primes = generate_primes(prime_count)\n output_filename = \"primes.txt\"\n save_primes_to_file(primes, output_filename)\n result = {output_filename: \"\\n\".join(map(str, primes))}\n return result\n\nresult = main()",
"result": {
"success": true,
"output": "",
"error": "",
"result": null,
"exitCode": 0
}
}
]

View file

@ -1,37 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
# REQUIREMENTS:
def is_prime(n: int) -> bool:
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def calculate_primes(limit: int) -> list:
primes = []
num = 2
while len(primes) < limit:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = calculate_primes(400)
primes_str = "\n".join(map(str, primes))
result = {
"primes.txt": {
"content": primes_str,
"base64Encoded": False,
"contentType": "text/plain"
}
}

View file

@ -1,13 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\ndef is_prime(n: int) -> bool:\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef calculate_primes(limit: int) -> list:\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = calculate_primes(400)\nprimes_str = \"\\n\".join(map(str, primes))\n\nresult = {\n \"primes.txt\": {\n \"content\": primes_str,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}",
"result": {
"success": true,
"output": "",
"error": "",
"result": null,
"exitCode": 0
}
}
]

View file

@ -1,41 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
# REQUIREMENTS:
import json
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def calculate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = calculate_primes(333)
primes_str = "\n".join(map(str, primes))
result = {
"output.txt": {
"content": primes_str,
"base64Encoded": False,
"contentType": "text/plain"
}
}
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\nimport json\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef calculate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = calculate_primes(333)\nprimes_str = \"\\n\".join(map(str, primes))\n\nresult = {\n \"output.txt\": {\n \"content\": primes_str,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"output.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"output.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,41 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
# REQUIREMENTS:
import json
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(444)
primes_str = "\n".join(map(str, primes))
result = {
"output.txt": {
"content": primes_str,
"base64Encoded": False,
"contentType": "text/plain"
}
}
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\nimport json\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(444)\nprimes_str = \"\\n\".join(map(str, primes))\n\nresult = {\n \"output.txt\": {\n \"content\": primes_str,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"output.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"output.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,444 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677
683
691
701
709
719
727
733
739
743
751
757
761
769
773
787
797
809
811
821
823
827
829
839
853
857
859
863
877
881
883
887
907
911
919
929
937
941
947
953
967
971
977
983
991
997
1009
1013
1019
1021
1031
1033
1039
1049
1051
1061
1063
1069
1087
1091
1093
1097
1103
1109
1117
1123
1129
1151
1153
1163
1171
1181
1187
1193
1201
1213
1217
1223
1229
1231
1237
1249
1259
1277
1279
1283
1289
1291
1297
1301
1303
1307
1319
1321
1327
1361
1367
1373
1381
1399
1409
1423
1427
1429
1433
1439
1447
1451
1453
1459
1471
1481
1483
1487
1489
1493
1499
1511
1523
1531
1543
1549
1553
1559
1567
1571
1579
1583
1597
1601
1607
1609
1613
1619
1621
1627
1637
1657
1663
1667
1669
1693
1697
1699
1709
1721
1723
1733
1741
1747
1753
1759
1777
1783
1787
1789
1801
1811
1823
1831
1847
1861
1867
1871
1873
1877
1879
1889
1901
1907
1913
1931
1933
1949
1951
1973
1979
1987
1993
1997
1999
2003
2011
2017
2027
2029
2039
2053
2063
2069
2081
2083
2087
2089
2099
2111
2113
2129
2131
2137
2141
2143
2153
2161
2179
2203
2207
2213
2221
2237
2239
2243
2251
2267
2269
2273
2281
2287
2293
2297
2309
2311
2333
2339
2341
2347
2351
2357
2371
2377
2381
2383
2389
2393
2399
2411
2417
2423
2437
2441
2447
2459
2467
2473
2477
2503
2521
2531
2539
2543
2549
2551
2557
2579
2591
2593
2609
2617
2621
2633
2647
2657
2659
2663
2671
2677
2683
2687
2689
2693
2699
2707
2711
2713
2719
2729
2731
2741
2749
2753
2767
2777
2789
2791
2797
2801
2803
2819
2833
2837
2843
2851
2857
2861
2879
2887
2897
2903
2909
2917
2927
2939
2953
2957
2963
2969
2971
2999
3001
3011
3019
3023
3037
3041
3049
3061
3067
3079
3083
3089
3109
3119

View file

@ -1,41 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
# REQUIREMENTS:
import json
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(555)
primes_content = "\n".join(map(str, primes))
result = {
"primes.txt": {
"content": primes_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\nimport json\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(555)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"primes.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"primes.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"primes.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,555 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677
683
691
701
709
719
727
733
739
743
751
757
761
769
773
787
797
809
811
821
823
827
829
839
853
857
859
863
877
881
883
887
907
911
919
929
937
941
947
953
967
971
977
983
991
997
1009
1013
1019
1021
1031
1033
1039
1049
1051
1061
1063
1069
1087
1091
1093
1097
1103
1109
1117
1123
1129
1151
1153
1163
1171
1181
1187
1193
1201
1213
1217
1223
1229
1231
1237
1249
1259
1277
1279
1283
1289
1291
1297
1301
1303
1307
1319
1321
1327
1361
1367
1373
1381
1399
1409
1423
1427
1429
1433
1439
1447
1451
1453
1459
1471
1481
1483
1487
1489
1493
1499
1511
1523
1531
1543
1549
1553
1559
1567
1571
1579
1583
1597
1601
1607
1609
1613
1619
1621
1627
1637
1657
1663
1667
1669
1693
1697
1699
1709
1721
1723
1733
1741
1747
1753
1759
1777
1783
1787
1789
1801
1811
1823
1831
1847
1861
1867
1871
1873
1877
1879
1889
1901
1907
1913
1931
1933
1949
1951
1973
1979
1987
1993
1997
1999
2003
2011
2017
2027
2029
2039
2053
2063
2069
2081
2083
2087
2089
2099
2111
2113
2129
2131
2137
2141
2143
2153
2161
2179
2203
2207
2213
2221
2237
2239
2243
2251
2267
2269
2273
2281
2287
2293
2297
2309
2311
2333
2339
2341
2347
2351
2357
2371
2377
2381
2383
2389
2393
2399
2411
2417
2423
2437
2441
2447
2459
2467
2473
2477
2503
2521
2531
2539
2543
2549
2551
2557
2579
2591
2593
2609
2617
2621
2633
2647
2657
2659
2663
2671
2677
2683
2687
2689
2693
2699
2707
2711
2713
2719
2729
2731
2741
2749
2753
2767
2777
2789
2791
2797
2801
2803
2819
2833
2837
2843
2851
2857
2861
2879
2887
2897
2903
2909
2917
2927
2939
2953
2957
2963
2969
2971
2999
3001
3011
3019
3023
3037
3041
3049
3061
3067
3079
3083
3089
3109
3119
3121
3137
3163
3167
3169
3181
3187
3191
3203
3209
3217
3221
3229
3251
3253
3257
3259
3271
3299
3301
3307
3313
3319
3323
3329
3331
3343
3347
3359
3361
3371
3373
3389
3391
3407
3413
3433
3449
3457
3461
3463
3467
3469
3491
3499
3511
3517
3527
3529
3533
3539
3541
3547
3557
3559
3571
3581
3583
3593
3607
3613
3617
3623
3631
3637
3643
3659
3671
3673
3677
3691
3697
3701
3709
3719
3727
3733
3739
3761
3767
3769
3779
3793
3797
3803
3821
3823
3833
3847
3851
3853
3863
3877
3881
3889
3907
3911
3917
3919
3923
3929
3931
3943
3947
3967
3989
4001
4003
4007
4013
4019

View file

@ -1,717 +0,0 @@
/**
* workflow.js
* The main coordinator module for the workflow functionality
* Acts as the entry point and orchestrates interactions between all other modules
* Implements a state machine for workflow status management
*/
import api from '../shared/apiCalls.js';
import * as WorkflowCoordination from './workflowCoordination.js';
import * as WorkflowUI from './workflowUi.js';
import * as WorkflowData from './workflowData.js';
// DOM elements mapping
const domElements = {};
// State machine constants
const WORKFLOW_STATES = {
NULL: null,
RUNNING: 'running',
COMPLETED: 'completed',
FAILED: 'failed',
STOPPED: 'stopped'
};
/**
* Initializes the workflow module
* @param {Object} globalStateObj - Global application state
*/
function initWorkflowModule(globalStateObj) {
console.log("Initializing workflow module...");
try {
// Initialize DOM elements
initDomElements();
// Initialize coordination layer with initial state
WorkflowCoordination.initCoordination({
status: WORKFLOW_STATES.NULL,
workflowId: "",
logs: [],
chatMessages: [],
lastPolledLogId: null,
lastPolledMessageId: null,
dataStats: {
bytesSent: 0,
bytesReceived: 0,
tokensUsed: 0
},
pollFailCount: 0
});
// Make DOM elements available to coordination layer
WorkflowCoordination.userInputState.domElements = domElements;
// Initialize UI layer with callbacks
WorkflowUI.initUI(
WorkflowCoordination.getWorkflowState(),
{
onResetWorkflow: resetWorkflow,
onStopWorkflow: stopWorkflow,
onLayoutChange: handleLayoutChange
}
);
// Initialize data layer
WorkflowData.initDataLayer(globalStateObj);
// Setup event listeners
setupEventListeners();
// Initialize file handling
initFileHandling();
// Load prompt options
if (globalStateObj && globalStateObj.mainView) {
loadPromptOptions(globalStateObj.mainView.availablePrompts || []);
}
// Show initial prompt view
WorkflowCoordination.showInitialPromptView();
console.log("Workflow module successfully initialized with state:", WORKFLOW_STATES.NULL);
} catch (error) {
console.error("Error initializing workflow module:", error);
window.utils.ui.showError("Failed to initialize workflow module: " + error.message);
}
}
/**
* Initializes all DOM element references
*/
function initDomElements() {
console.log("Initializing DOM elements");
// Main containers
domElements.workflowContainer = document.querySelector('.workflow-container');
domElements.workflowHeader = document.querySelector('.workflow-header');
domElements.chatSection = document.querySelector('.chat-section');
domElements.workflowFooter = document.querySelector('.workflow-footer');
// UI components
domElements.resetBtn = document.getElementById('reset-btn');
domElements.stopWorkflowBtn = document.getElementById('stop-workflow-btn');
domElements.executionLog = document.getElementById('execution-log');
domElements.agentChatMessages = document.getElementById('agent-chat-messages');
domElements.emptyChatState = document.getElementById('empty-chat-state');
domElements.userMessageInput = document.getElementById('user-message-input');
domElements.sendUserMessageBtn = document.getElementById('send-user-message-btn');
domElements.toggleHeaderBtn = document.getElementById('toggle-header-btn');
domElements.userInputArea = document.getElementById('user-input-area');
// File handling
domElements.filePreviewContainer = document.getElementById('file-preview-container');
domElements.filePreviewContent = document.getElementById('file-preview-content');
domElements.downloadFileBtn = document.getElementById('download-file-btn');
domElements.copyFileBtn = document.getElementById('copy-file-btn');
domElements.uploadAdditionalFileBtn = document.getElementById('upload-additional-file-btn');
domElements.additionalFileInput = document.getElementById('additional-file-input');
domElements.additionalFilesContainer = document.getElementById('additional-files-container');
// Prompt selection
domElements.promptSelectMain = document.getElementById('prompt-select-main');
// Statistics
domElements.dataStatisticsEl = document.getElementById('data-statistics');
// Log found elements vs missing elements
const foundElements = Object.keys(domElements).filter(key => domElements[key] !== null).length;
const missingElements = Object.keys(domElements).filter(key => domElements[key] === null);
console.log(`DOM elements initialized: ${foundElements} found, ${missingElements.length} missing`);
if (missingElements.length > 0) {
console.warn("Missing DOM elements:", missingElements.join(', '));
}
}
/**
* Sets up event listeners
*/
function setupEventListeners() {
console.log("Setting up event listeners");
// User input handling
if (domElements.userMessageInput) {
// Track input changes
domElements.userMessageInput.addEventListener('input', (e) => {
WorkflowCoordination.userInputState.promptText = e.target.value;
});
// Handle Enter key for submission
domElements.userMessageInput.addEventListener('keydown', (e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
sendUserResponse();
}
});
}
// Send button
if (domElements.sendUserMessageBtn) {
// Remove any existing listeners
const newButton = domElements.sendUserMessageBtn.cloneNode(true);
if (domElements.sendUserMessageBtn.parentNode) {
domElements.sendUserMessageBtn.parentNode.replaceChild(newButton, domElements.sendUserMessageBtn);
}
domElements.sendUserMessageBtn = newButton;
newButton.addEventListener('click', sendUserResponse);
}
// Reset button
if (domElements.resetBtn) {
domElements.resetBtn.addEventListener('click', resetWorkflow);
}
// Stop workflow button
if (domElements.stopWorkflowBtn) {
domElements.stopWorkflowBtn.addEventListener('click', stopWorkflow);
}
// Prompt selection
if (domElements.promptSelectMain) {
domElements.promptSelectMain.addEventListener('change', handlePromptSelection);
}
// Add custom event listeners for workflow state changes
document.addEventListener('workflowStatusChanged', handleWorkflowStatusChange);
}
/**
* Handler for workflow status change events
* @param {CustomEvent} event - The status change event
*/
function handleWorkflowStatusChange(event) {
const { status, previousStatus, options } = event.detail;
console.log(`Workflow status transition: ${previousStatus}${status}`);
// Update UI based on state transitions
switch (status) {
case WORKFLOW_STATES.RUNNING:
// Show running UI state
if (domElements.stopWorkflowBtn) {
domElements.stopWorkflowBtn.style.display = 'inline-block';
}
if (domElements.emptyChatState) {
domElements.emptyChatState.style.display = 'none';
}
if (domElements.agentChatMessages) {
domElements.agentChatMessages.style.display = 'block';
}
break;
case WORKFLOW_STATES.COMPLETED:
// Show completed UI state
if (domElements.stopWorkflowBtn) {
domElements.stopWorkflowBtn.style.display = 'none';
}
WorkflowCoordination.showInitialPromptView();
break;
case WORKFLOW_STATES.FAILED:
// Show failed UI state with retry option
if (domElements.stopWorkflowBtn) {
domElements.stopWorkflowBtn.style.display = 'none';
}
WorkflowCoordination.showInitialPromptView();
// Optionally show error UI
window.utils.ui.showToast(options.message || "Workflow failed", "error");
break;
case WORKFLOW_STATES.STOPPED:
// Show stopped UI state with resume option
if (domElements.stopWorkflowBtn) {
domElements.stopWorkflowBtn.style.display = 'none';
}
WorkflowCoordination.showInitialPromptView();
break;
case WORKFLOW_STATES.NULL:
// Reset to initial state
if (domElements.stopWorkflowBtn) {
domElements.stopWorkflowBtn.style.display = 'none';
}
if (domElements.emptyChatState) {
domElements.emptyChatState.style.display = 'flex';
}
if (domElements.agentChatMessages) {
domElements.agentChatMessages.style.display = 'none';
}
WorkflowCoordination.showInitialPromptView();
break;
}
}
/**
* Initializes file handling functionality
*/
function initFileHandling() {
// Setup file input handling
if (domElements.uploadAdditionalFileBtn && domElements.additionalFileInput) {
domElements.uploadAdditionalFileBtn.addEventListener('click', () => {
domElements.additionalFileInput.click();
});
domElements.additionalFileInput.addEventListener('change', (event) => {
const files = event.target.files;
for (let file of files) {
handleFileSelection(file);
}
event.target.value = '';
});
}
// Initialize drag & drop
if (domElements.userInputArea) {
initDragAndDrop(domElements.userInputArea);
}
}
/**
* Initializes drag and drop functionality
* @param {HTMLElement} dropArea - The area where files can be dropped
*/
function initDragAndDrop(dropArea) {
console.log("Initializing drag & drop for files");
// Prevent default drag behaviors
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
dropArea.addEventListener(eventName, preventDefaults, false);
});
// Highlight drop area when item is dragged over it
dropArea.addEventListener('dragenter', () => {
dropArea.classList.add('dragging');
});
dropArea.addEventListener('dragover', () => {
dropArea.classList.add('dragging');
});
// Remove highlight when item is dragged out or dropped
dropArea.addEventListener('dragleave', () => {
dropArea.classList.remove('dragging');
});
dropArea.addEventListener('drop', (e) => {
dropArea.classList.remove('dragging');
const files = e.dataTransfer.files;
if (files.length > 0) {
for (let file of files) {
handleFileSelection(file);
}
}
});
function preventDefaults(e) {
e.preventDefault();
e.stopPropagation();
}
}
/**
* Handles file selection from input or drop
* @param {File} file - The selected file
*/
async function handleFileSelection(file) {
try {
// Upload and process file
const processedFile = await api.uploadFile(file);
// Add to additional files list if successful
if (processedFile) {
const fileExists = WorkflowCoordination.userInputState.additionalFiles.some(f => f.id === processedFile.id);
if (!fileExists) {
WorkflowCoordination.userInputState.additionalFiles.push(processedFile);
WorkflowUI.renderAdditionalFiles(WorkflowCoordination.userInputState.additionalFiles);
WorkflowUI.updatePromptVisualization();
}
}
} catch (error) {
console.error(`Error processing file ${file.name}:`, error);
window.utils.ui.showToast("File Processing Error", `Failed to process file ${file.name}: ${error.message}`, "error");
}
}
/**
* Loads prompt options into the select dropdown
* @param {Array} prompts - Available prompt templates
*/
function loadPromptOptions(prompts) {
if (!domElements.promptSelectMain || !prompts) {
return;
}
// Clear existing options, keeping the default
while (domElements.promptSelectMain.options.length > 1) {
domElements.promptSelectMain.remove(1);
}
// Add prompts to select
prompts.forEach(prompt => {
const option = document.createElement('option');
option.value = prompt.id;
option.textContent = prompt.name || `Prompt ${prompt.id}`;
domElements.promptSelectMain.appendChild(option);
});
}
/**
* Handles selection from the prompt dropdown
*/
function handlePromptSelection() {
if (!domElements.promptSelectMain || !domElements.userMessageInput) {
return;
}
const selectedPromptId = domElements.promptSelectMain.value;
if (!selectedPromptId) {
domElements.userMessageInput.value = '';
WorkflowCoordination.userInputState.promptText = '';
return;
}
// Find selected prompt
const prompts = window.globalState?.mainView?.availablePrompts || [];
const selectedPrompt = prompts.find(p => String(p.id) === selectedPromptId);
if (selectedPrompt) {
domElements.userMessageInput.value = selectedPrompt.content;
WorkflowCoordination.userInputState.promptText = selectedPrompt.content;
domElements.userMessageInput.focus();
}
}
/**
* Sends a user response to the workflow based on current state
*/
async function sendUserResponse() {
if (!domElements.userMessageInput) {
console.error("Error: userMessageInput not found");
window.utils.ui.showToast("No input available", "error");
return;
}
// Get user message
const userMessage = WorkflowCoordination.userInputState.promptText.trim();
if (!userMessage) {
window.utils.ui.showToast("Missing Input. Please enter a message", "warning");
return;
}
// Get current workflow state
const workflowState = WorkflowCoordination.getWorkflowState();
// Set loading state
WorkflowCoordination.setLoadingState(true);
try {
// Create or continue workflow based on current state
if (workflowState.workflowId &&
[WORKFLOW_STATES.COMPLETED, WORKFLOW_STATES.FAILED, WORKFLOW_STATES.STOPPED].includes(workflowState.status)) {
// This is a continuation of a completed/failed/stopped workflow
await continueExistingWorkflow(workflowState.workflowId, userMessage);
} else if (workflowState.workflowId && workflowState.status === WORKFLOW_STATES.RUNNING) {
// Continuing a running workflow
await continueRunningWorkflow(workflowState.workflowId, userMessage);
} else {
// Starting a new workflow
await createNewWorkflow(userMessage);
}
// Reset input after successful submission
domElements.userMessageInput.value = '';
WorkflowCoordination.userInputState.promptText = '';
WorkflowCoordination.clearAttachedFiles();
} catch (error) {
console.error("Error sending message:", error);
// Transition to failed state if we were in running state
if (workflowState.status === WORKFLOW_STATES.RUNNING) {
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.FAILED, {
message: `Communication error: ${error.message}`,
systemMessage: "Failed to process your request"
});
}
window.utils.ui.showToast(`Communication Error: Failed to send message: ${error.message}`, "error");
} finally {
// Unlock UI
WorkflowCoordination.setLoadingState(false);
}
}
/**
* Creates a new workflow with user prompt
* @param {string} userMessage - User prompt
*/
async function createNewWorkflow(userMessage) {
console.log("Starting new workflow");
// Add log entry for starting workflow
const startLog = WorkflowCoordination.addLogEntry("Starting new workflow...", "info");
try {
// Transition state to running before API call
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.RUNNING, {
message: "Preparing workflow",
logId: startLog.id
});
// Create new workflow
const response = await WorkflowData.createWorkflow(
userMessage,
WorkflowCoordination.userInputState.additionalFiles
);
if (!response || !response.id) {
throw new Error("Failed to start workflow: No workflow ID received");
}
// Set active workflow
WorkflowCoordination.setActiveWorkflow(response.id);
// Start polling for updates
WorkflowData.pollWorkflowStatus(response.id).catch(error => {
console.error("Error in polling process:", error);
// Optionally handle the error (e.g., show a notification)
});
} catch (error) {
console.error("Error creating workflow:", error);
// Revert to null state
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.NULL, {
message: `Failed to create workflow: ${error.message}`,
systemMessage: "Failed to start workflow"
});
throw error;
}
}
/**
* Continues a completed, failed, or stopped workflow
* @param {string} workflowId - ID of the workflow
* @param {string} userMessage - User prompt
*/
async function continueExistingWorkflow(workflowId, userMessage) {
console.log(`Continuing workflow ${workflowId} from ${WorkflowCoordination.getWorkflowStatus()} state`);
// Add log entry
const continueLog = WorkflowCoordination.addLogEntry("Continuing workflow...", "info");
try {
// Transition state to running
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.RUNNING, {
message: "Resuming workflow",
logId: continueLog.id
});
// Get file IDs
const additionalFileIds = WorkflowCoordination.userInputState.additionalFiles.map(file => file.id);
// Submit user input
await WorkflowData.submitUserInput(
workflowId,
userMessage,
additionalFileIds
);
// Start polling
WorkflowData.pollWorkflowStatus(workflowId);
} catch (error) {
console.error("Error continuing workflow:", error);
// Revert to previous state
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.FAILED, {
message: `Failed to continue workflow: ${error.message}`,
systemMessage: "Failed to process your request"
});
throw error;
}
}
/**
* Continues a running workflow
* @param {string} workflowId - ID of the workflow
* @param {string} userMessage - User prompt
*/
async function continueRunningWorkflow(workflowId, userMessage) {
console.log(`Continuing running workflow ${workflowId}`);
try {
// Get file IDs
const additionalFileIds = WorkflowCoordination.userInputState.additionalFiles.map(file => file.id);
// Submit user input
await WorkflowData.submitUserInput(
workflowId,
userMessage,
additionalFileIds
);
// Update logs
WorkflowCoordination.addLogEntry("Continuing workflow with user input", "info");
// Make sure we're still in running state
if (WorkflowCoordination.getWorkflowStatus() !== WORKFLOW_STATES.RUNNING) {
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.RUNNING, {
message: "Processing user input"
});
}
// Poll for updates
WorkflowData.pollWorkflowStatus(workflowId);
} catch (error) {
console.error("Error submitting to running workflow:", error);
throw error;
}
}
/**
* Stops the current workflow
*/
async function stopWorkflow() {
const workflowState = WorkflowCoordination.getWorkflowState();
if (!workflowState.workflowId || workflowState.status !== WORKFLOW_STATES.RUNNING) {
console.warn("No running workflow to stop");
return;
}
try {
// IMMEDIATELY modify state before async operations
// This prevents race conditions during workflow stopping
workflowState.pollActive = false;
// Clear polling sequence to prevent orphaned polling
if (workflowState._pollingSequenceId) {
workflowState._pollingSequenceId = null;
}
// Add log entry
WorkflowCoordination.addLogEntry("Stopping workflow...", "warning");
// Call API to stop workflow
await WorkflowData.stopWorkflow(workflowState.workflowId);
// Update to stopped status
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.STOPPED, {
message: "Workflow has been stopped",
systemMessage: "Workflow was manually stopped"
});
console.log("Workflow stopped successfully, polling disabled");
} catch (error) {
console.error("Error stopping workflow:", error);
// Log error but still try to update UI state
WorkflowCoordination.addLogEntry(`Error stopping workflow: ${error.message}`, "error");
// Force status to stopped even if API call failed
WorkflowCoordination.updateWorkflowStatus(WORKFLOW_STATES.STOPPED, {
message: "Workflow stopped with errors",
systemMessage: "Workflow was stopped but there were errors"
});
}
}
/**
* Resets the workflow completely
*/
function resetWorkflow() {
// Stop any ongoing polling
workflowState.pollActive = false;
// Clear polling sequence to prevent orphaned polling
if (workflowState._pollingSequenceId) {
workflowState._pollingSequenceId = null;
}
// Reset state
workflowState = {
status: null,
workflowId: "",
logs: [],
chatMessages: [],
lastPolledLogId: null,
lastPolledMessageId: null,
dataStats: {
bytesSent: 0,
bytesReceived: 0,
tokensUsed: 0
},
pollFailCount: 0,
pollActive: false
};
// Reset user input
userInputState.promptText = "";
userInputState.additionalFiles = [];
// Stop animations
stopWaitingAnimation();
// Dispatch event for UI update
const event = new CustomEvent('workflowReset');
document.dispatchEvent(event);
console.log("Workflow state reset");
}
/**
* Handles layout changes
* @param {Object} layoutConfig - Layout configuration
*/
function handleLayoutChange(layoutConfig) {
console.log("Layout change:", layoutConfig);
// Implement layout change handling logic
if (layoutConfig.collapseHeader) {
if (domElements.workflowHeader) {
domElements.workflowHeader.classList.add('collapsed');
}
}
if (layoutConfig.expandHeader) {
if (domElements.workflowHeader) {
domElements.workflowHeader.classList.remove('collapsed');
}
}
// Force layout adjustment
setTimeout(() => {
window.dispatchEvent(new Event('resize'));
}, 100);
}
// Export the initialization function
export {
initWorkflowModule,
WORKFLOW_STATES
};

View file

@ -1,672 +0,0 @@
/**
* Central coordination layer for workflow state management
* Handles state transitions and provides methods for updating state
*/
// Workflow state with simplified status management
let workflowState = {
status: null, // null or 'running', 'completed', 'failed', 'stopped'
workflowId: "",
logs: [],
chatMessages: [],
lastPolledLogId: null,
lastPolledMessageId: null,
dataStats: {
bytesSent: 0,
bytesReceived: 0,
tokensUsed: 0
},
pollFailCount: 0, // For tracking consecutive polling failures
pollActive: false
};
// User input state
let userInputState = {
promptText: "", // Current prompt text
additionalFiles: [], // Additional files to send with prompt
domElements: {} // DOM element references
};
// Waiting animation state
let waitingDotsInterval = null;
let lastWaitingLogId = null;
/**
* Initializes the coordination module with a workflow state
* @param {Object} initialState - Initial workflow state
*/
function initCoordination(initialState) {
console.log("Initializing workflow coordination...");
// Initialize state with provided state or defaults
if (initialState) {
workflowState = { ...workflowState, ...initialState };
}
document.addEventListener('removeAdditionalFile', function(event) {
if (!event.detail || typeof event.detail.index !== 'number') {
console.error("Missing index in removeAdditionalFile event");
return;
}
const index = event.detail.index;
// Validate index is in range
if (index >= 0 && index < userInputState.additionalFiles.length) {
// Remove the file at the specified index
userInputState.additionalFiles.splice(index, 1);
// Update UI components
const filesEvent = new CustomEvent('filesUpdated', {
detail: { files: userInputState.additionalFiles }
});
document.dispatchEvent(filesEvent);
}
});
console.log("Workflow coordination initialized with state:", workflowState);
}
/**
* Updates workflow status and synchronizes UI
* @param {string} newStatus - New status (null, 'running', 'completed', 'failed', 'stopped')
* @param {Object} options - Additional options (message, workflowId, etc.)
* @returns {Object} Updated workflow state
*/
function updateWorkflowStatus(newStatus, options = {}) {
// Take a snapshot of the current state to detect race conditions
const prevStatus = workflowState.status;
const prevPollActive = workflowState.pollActive;
// Validate state transition
if (!isValidStateTransition(prevStatus, newStatus)) {
console.warn(`Invalid state transition: ${prevStatus}${newStatus}`);
return workflowState;
}
// Update workflow ID if provided
if (options.workflowId) {
workflowState.workflowId = options.workflowId;
}
// Log status change
console.log(`Workflow status change: ${prevStatus}${newStatus}`,
options.message ? `(${options.message})` : '');
// Set the new status - THIS MUST HAPPEN BEFORE MODIFYING pollActive
workflowState.status = newStatus;
// Reset poll fail count on valid status changes
if (newStatus !== prevStatus) {
workflowState.pollFailCount = 0;
}
if (newStatus === 'running' || newStatus === 'completed') {
workflowState.pollActive = true;
console.log(`Status changed to ${newStatus}, polling remains active`);
} else {
// For other states ('failed', 'stopped', null), disable polling
workflowState.pollActive = false;
console.log(`Status changed to ${newStatus}, polling deactivated`);
}
// Log if polling state changed to help debug race conditions
if (prevPollActive !== workflowState.pollActive) {
console.log(`Polling state changed: ${prevPollActive}${workflowState.pollActive}`);
}
// Add log entry if message provided
if (options.message) {
let logType = 'info';
if (newStatus === 'completed') logType = 'success';
if (newStatus === 'failed') logType = 'error';
if (newStatus === 'stopped') logType = 'warning';
addLogEntry(options.message, logType, null, options.agent || 'System');
}
// Status-specific actions
switch (newStatus) {
case 'running':
// Start/continue animation on log if specified
if (options.logId) {
startWaitingAnimation(options.logId);
} else if (workflowState.logs.length > 0) {
const recentLogs = workflowState.logs
.filter(log => log.type === 'info' && !log.message.includes('completed'))
.sort((a, b) => new Date(b.timestamp) - new Date(a.timestamp));
if (recentLogs.length > 0) {
startWaitingAnimation(recentLogs[0].id);
}
}
break;
case 'completed':
case 'failed':
case 'stopped':
// Stop any animations
stopWaitingAnimation();
// Add system message to chat if provided
if (options.systemMessage) {
addChatMessage({
type: 'system',
content: options.systemMessage,
timestamp: new Date().toISOString()
});
}
break;
}
// Trigger UI updates by dispatching an event
const event = new CustomEvent('workflowStatusChanged', {
detail: {
status: newStatus,
previousStatus: prevStatus,
options: options
}
});
document.dispatchEvent(event);
// Execute additional callback if provided
if (typeof options.callback === 'function') {
options.callback();
}
return workflowState;
}
/**
* Checks if a state transition is valid according to the state machine
* @param {string} fromState - Current state
* @param {string} toState - Target state
* @returns {boolean} - Whether transition is valid
*/
function isValidStateTransition(fromState, toState) {
// Define valid transitions
const validTransitions = {
'null': ['running'],
'running': ['running', 'completed', 'failed', 'stopped'],
'completed': ['running', 'null'],
'failed': ['running', 'null'],
'stopped': ['running', 'null']
};
// Special case: Reset to null is always allowed
if (toState === null) {
return true;
}
// Check if current state has defined transitions
if (!validTransitions[fromState]) {
// Allow any transition if current state is unknown
return true;
}
// Check if transition is valid
return validTransitions[fromState].includes(toState);
}
/**
* Shows the initial prompt view
*/
function showInitialPromptView() {
const elements = userInputState.domElements;
console.log("Showing initial prompt view");
// Change placeholder for initial prompts
if (elements.userMessageInput) {
elements.userMessageInput.placeholder = workflowState.workflowId ?
"Continue the conversation..." :
"Enter a new prompt...";
elements.userMessageInput.value = ""; // Clear existing text
userInputState.promptText = "";
}
// Change button text based on state
if (elements.sendUserMessageBtn) {
elements.sendUserMessageBtn.innerHTML = workflowState.workflowId ?
'<i class="fas fa-paper-plane"></i> Send' :
'<i class="fas fa-play"></i> Start';
}
// Clear attached files
userInputState.additionalFiles = [];
if (elements.additionalFilesContainer) {
elements.additionalFilesContainer.innerHTML = '';
}
// Focus input field
if (elements.userMessageInput) {
elements.userMessageInput.focus();
}
// Update UI elements
const event = new CustomEvent('showInitialPromptView', {
detail: { workflowId: workflowState.workflowId }
});
document.dispatchEvent(event);
}
/**
* Sets loading state for UI
* @param {boolean} isLoading - Whether UI is in loading state
*/
function setLoadingState(isLoading) {
const elements = userInputState.domElements;
console.log(`Setting UI loading state: ${isLoading ? 'Active' : 'Inactive'}`);
// User input field
if (elements.userMessageInput) {
elements.userMessageInput.disabled = isLoading;
}
// Send button
if (elements.sendUserMessageBtn) {
elements.sendUserMessageBtn.disabled = isLoading;
if (isLoading) {
elements.sendUserMessageBtn.innerHTML = '<i class="fas fa-spinner fa-spin"></i>';
} else {
// Different icons based on context
elements.sendUserMessageBtn.innerHTML = workflowState.workflowId ?
'<i class="fas fa-paper-plane"></i> Send' :
'<i class="fas fa-play"></i> Start';
}
}
// File upload button
if (elements.uploadAdditionalFileBtn) {
elements.uploadAdditionalFileBtn.disabled = isLoading;
}
// Prompt selection
if (elements.promptSelectMain) {
elements.promptSelectMain.disabled = isLoading;
}
// Dispatch event for other components to respond
const event = new CustomEvent('loadingStateChanged', {
detail: { isLoading: isLoading }
});
document.dispatchEvent(event);
}
/**
* Starts waiting animation on a log entry
* @param {string} logId - ID of the log entry
*/
function startWaitingAnimation(logId) {
// Stop any existing animation
stopWaitingAnimation();
// Mark log as waiting
const log = workflowState.logs.find(l => l.id === logId);
if (log) {
// Reset waiting status for all logs
workflowState.logs.forEach(l => l.waiting = false);
// Set waiting status for this log
log.waiting = true;
lastWaitingLogId = logId;
// Start animation
waitingDotsInterval = setInterval(() => {
updateWaitingDots();
}, 500);
// Dispatch event for UI update
const event = new CustomEvent('logsUpdated', {
detail: { logs: workflowState.logs, waitingLogId: logId }
});
document.dispatchEvent(event);
// Start animation immediately
updateWaitingDots();
} else {
console.warn("Log entry not found for waiting animation:", logId);
}
}
/**
* Stops waiting animation
*/
function stopWaitingAnimation() {
if (waitingDotsInterval) {
clearInterval(waitingDotsInterval);
waitingDotsInterval = null;
}
// Reset waiting status for all logs
if (workflowState.logs) {
workflowState.logs.forEach(log => {
if (log.waiting) {
log.waiting = false;
}
});
}
// Clear waiting dots in DOM
try {
document.querySelectorAll('.waiting-dots').forEach(el => {
el.textContent = '';
});
} catch (e) {
console.error("Error clearing waiting dots:", e);
}
// Reset last waiting log ID
lastWaitingLogId = null;
// Dispatch event for UI update
const event = new CustomEvent('logsUpdated', {
detail: { logs: workflowState.logs, waitingLogId: null }
});
document.dispatchEvent(event);
}
/**
* Updates waiting dots animation
*/
function updateWaitingDots() {
const waitingDotsElements = document.querySelectorAll('.waiting-dots');
waitingDotsElements.forEach(element => {
// Get current dots count
const currentText = element.textContent || '';
let dotsCount = currentText.length;
// Increment and limit dots count
dotsCount = (dotsCount + 1) % 4;
element.textContent = '.'.repeat(dotsCount);
});
// If no elements found but animation is running, force re-render
if (waitingDotsElements.length === 0 && waitingDotsInterval) {
// Find logs with waiting flag
const waitingLogs = workflowState.logs ?
workflowState.logs.filter(log => log.waiting) : [];
if (waitingLogs.length > 0) {
// Dispatch event for UI update
const event = new CustomEvent('logsUpdated', {
detail: { logs: workflowState.logs }
});
document.dispatchEvent(event);
}
}
}
/**
* Sets the active workflow
* @param {string} workflowId - ID of the active workflow
*/
function setActiveWorkflow(workflowId) {
if (!workflowId) {
console.error("Invalid workflow ID");
return;
}
console.log(`Setting active workflow: ${workflowId}`);
workflowState.workflowId = workflowId;
workflowState.pollActive = true; // Set polling to active when workflow becomes active
// Update global state for better accessibility
if (window.globalState && window.globalState.mainView) {
window.globalState.mainView.currentWorkflowId = workflowId;
console.log("Workflow ID also set in globalState");
}
// Update workflow status to running
updateWorkflowStatus('running', {
workflowId: workflowId,
message: "Workflow started"
});
}
/**
* Adds a log entry to the workflow
* @param {string} message - Log message
* @param {string} type - Log type ('info', 'warning', 'error', 'success')
* @param {string|null} details - Additional details
* @param {string} agentName - Name of the agent that generated the log
* @param {number} progress - Progress value (0-100)
* @returns {Object} The created log entry
*/
function addLogEntry(message, type = 'info', details = null, agentName = null, progress = null) {
// Stop previous animation
stopWaitingAnimation();
console.log(`Adding log entry: ${type} - ${message}${agentName ? ` [${agentName}]` : ''}`);
const log = {
id: `log_${Date.now()}`,
message,
type,
details,
agentName,
timestamp: new Date().toISOString(),
progress: progress,
status: workflowState.status
};
// Ensure logs array exists
if (!workflowState.logs) {
workflowState.logs = [];
}
// Special formatting for certain message types
if (message.includes("Agent") && message.includes("selected")) {
log.type = "info";
log.highlighted = true;
}
if (message.includes("Moderator") && message.includes("analyzing")) {
log.type = "info";
log.highlighted = true;
}
if (message.includes("completed") || message.includes("finished")) {
log.type = "success";
}
workflowState.logs.push(log);
// Dispatch event for UI update
const event = new CustomEvent('logsUpdated', {
detail: { logs: workflowState.logs, newLog: log }
});
document.dispatchEvent(event);
// If workflow is running, start waiting animation
if (workflowState.status === 'running') {
startWaitingAnimation(log.id);
}
return log;
}
/**
* Adds a chat message to the workflow
* @param {Object} message - Message to add
* @returns {Object} The added message
*/
function addChatMessage(message) {
// Ensure message has ID and other required properties
const processedMessage = {
...message,
id: message.id || `msg_${message.role || 'unknown'}_${Date.now()}`,
documents: message.documents || [],
timestamp: message.timestamp || new Date().toISOString()
};
console.log(`Adding chat message (ID: ${processedMessage.id}, Role: ${processedMessage.role || 'unknown'}, Status: ${processedMessage.status || 'none'})`);
// Ensure chat messages array exists
if (!workflowState.chatMessages) {
workflowState.chatMessages = [];
}
// Check for duplicates and update or add message
const existingIndex = workflowState.chatMessages.findIndex(m => m.id === processedMessage.id);
if (existingIndex === -1) {
workflowState.chatMessages.push(processedMessage);
} else {
workflowState.chatMessages[existingIndex] = processedMessage;
}
// IMPORTANT: Handle message status according to state machine spec
if (processedMessage.status === 'last') {
console.log("Last message received, stopping polling without changing workflow status");
workflowState.pollActive = false;
// Dispatch a workflow completion event
const completionEvent = new CustomEvent('workflowCompleted', {
detail: {
status: workflowState.status,
message: "All workflow messages received"
}
});
document.dispatchEvent(completionEvent);
}
// Dispatch event for UI update with immutable copies
const event = new CustomEvent('chatMessagesUpdated', {
detail: {
chatMessages: [...workflowState.chatMessages],
newMessage: {...processedMessage},
workflowId: workflowState.workflowId
}
});
document.dispatchEvent(event);
return processedMessage;
}
/**
* Clears all attached files
*/
function clearAttachedFiles() {
userInputState.additionalFiles = [];
// Dispatch event for UI update
const event = new CustomEvent('filesUpdated', {
detail: { files: userInputState.additionalFiles }
});
document.dispatchEvent(event);
// Clear file input
const fileInput = userInputState.domElements.additionalFileInput;
if (fileInput) {
fileInput.value = '';
}
console.log("Files cleared from input area");
}
/**
* Updates data statistics
* @param {number} sentBytes - Sent bytes
* @param {number} receivedBytes - Received bytes
* @param {number} tokensUsed - Tokens used (for AI models)
*/
function updateDataStats(sentBytes, receivedBytes, tokensUsed = 0) {
// Ensure valid numbers
if (sentBytes && !isNaN(sentBytes)) {
workflowState.dataStats.bytesSent += Math.max(0, parseInt(sentBytes, 10));
}
if (receivedBytes && !isNaN(receivedBytes)) {
workflowState.dataStats.bytesReceived += Math.max(0, parseInt(receivedBytes, 10));
}
if (tokensUsed && !isNaN(tokensUsed)) {
workflowState.dataStats.tokensUsed += Math.max(0, parseInt(tokensUsed, 10));
}
// Dispatch event for UI update
const event = new CustomEvent('dataStatsUpdated', {
detail: {
sentBytes: workflowState.dataStats.bytesSent,
receivedBytes: workflowState.dataStats.bytesReceived,
tokensUsed: workflowState.dataStats.tokensUsed
}
});
document.dispatchEvent(event);
}
/**
* Gets current workflow state
* @returns {Object} Workflow state
*/
function getWorkflowState() {
// Return a copy to prevent unintended modifications
return workflowState;
}
/**
* Gets workflow status
* @returns {string} Workflow status
*/
function getWorkflowStatus() {
return workflowState.status;
}
/**
* Resets workflow state
*/
function resetWorkflow() {
// Reset state
workflowState = {
status: null,
workflowId: "",
logs: [],
chatMessages: [],
lastPolledLogId: null,
lastPolledMessageId: null,
dataStats: {
bytesSent: 0,
bytesReceived: 0,
tokensUsed: 0
},
pollFailCount: 0,
pollActive: false
};
// Reset user input
userInputState.promptText = "";
userInputState.additionalFiles = [];
// Stop animations
stopWaitingAnimation();
// Dispatch event for UI update
const event = new CustomEvent('workflowReset');
document.dispatchEvent(event);
console.log("Workflow state reset");
}
// Export all functions and state objects
export {
initCoordination,
userInputState,
updateWorkflowStatus,
showInitialPromptView,
setLoadingState,
startWaitingAnimation,
stopWaitingAnimation,
setActiveWorkflow,
addLogEntry,
addChatMessage,
clearAttachedFiles,
updateDataStats,
getWorkflowState,
getWorkflowStatus,
resetWorkflow,
isValidStateTransition
};

View file

@ -1,541 +0,0 @@
/**
* Handles all API communication and data processing for the workflow module
*/
import api from '../shared/apiCalls.js';
import * as WorkflowCoordination from './workflowCoordination.js';
// Reference to the global state
let globalState = null;
/**
* Initializes the data management layer
* @param {Object} globalStateObj - Global application state
*/
function initDataLayer(globalStateObj) {
console.log("Initializing workflow data layer...");
globalState = globalStateObj;
console.log("Workflow data layer successfully initialized");
}
/**
* Uploads a file and adds it to appropriate list
* @param {File} file - The file to upload
* @returns {Promise<Object>} - Processed file object
*/
async function uploadAndAddFile(file) {
try {
let processedFile = null;
// Check if file already exists
let existingFile = null;
if (globalState && globalState.mainView && globalState.mainView.availableFiles) {
// First check by name and size (exact match)
existingFile = globalState.mainView.availableFiles.find(
f => f.name === file.name && f.size === file.size
);
// If not found, try with just the name
if (!existingFile) {
existingFile = globalState.mainView.availableFiles.find(
f => f.name === file.name
);
}
}
if (existingFile) {
// Use the existing file
console.log(`Using existing file: ${existingFile.name} (${existingFile.id})`);
processedFile = existingFile;
} else {
// Upload new file
console.log(`Uploading new file: ${file.name}`);
try {
processedFile = await api.uploadFile(file);
console.log(`File uploaded successfully: ${processedFile.name} (${processedFile.id})`);
// Add to global available files
if (globalState && globalState.mainView) {
if (!globalState.mainView.availableFiles) {
globalState.mainView.availableFiles = [];
}
globalState.mainView.availableFiles.push(processedFile);
}
// Update data statistics using tokensUsed from the response
if (processedFile.tokensUsed) {
WorkflowCoordination.updateDataStats(file.size, processedFile.tokensUsed);
} else {
WorkflowCoordination.updateDataStats(file.size, 0);
}
} catch (uploadError) {
console.error("Error uploading file:", uploadError);
throw uploadError;
}
}
return processedFile;
} catch (error) {
console.error("Error processing file:", error);
throw error;
}
}
/**
* Creates a new workflow with the given input
* @param {string} promptText - Text for the workflow prompt
* @param {Array} selectedFiles - Array of selected files
* @returns {Promise<Object>} - Created workflow
*/
async function createWorkflow(promptText, selectedFiles) {
try {
// Prepare file IDs
const fileIds = selectedFiles.map(file => file.id);
// Log the operation
console.log(`Creating new workflow with prompt: "${promptText.substring(0, 50)}${promptText.length > 50 ? '...' : ''}"`,
`and ${fileIds.length} files`);
// Make API call to create workflow
const response = await api.submitUserInput("", promptText, fileIds);
// Update data statistics with tokensUsed from response if available
if (response && response.dataStats) {
WorkflowCoordination.updateDataStats(
response.dataStats.bytesSent || 0,
response.dataStats.bytesReceived || 0,
response.dataStats.tokensUsed || 0
);
}
console.log("Workflow creation response:");
console.log(response);
return response;
} catch (error) {
console.error("Error creating workflow:", error);
throw error;
}
}
/**
* Submits user input to a running workflow
* @param {string} workflowId - ID of the workflow
* @param {string} prompt - User message
* @param {Array} listFileId - IDs of additional files
* @returns {Promise<Object>} - API response
*/
async function submitUserInput(workflowId, prompt, listFileId = []) {
try {
console.log(`Submitting user input to workflow ${workflowId}`);
console.log(`Prompt: "${prompt.substring(0, 50)}${prompt.length > 50 ? '...' : ''}"`,
`with ${listFileId.length} files`);
// Make API call
const response = await api.submitUserInput(workflowId, prompt, listFileId);
// Update data statistics with response stats
if (response && response.dataStats) {
WorkflowCoordination.updateDataStats(
response.dataStats.bytesSent || 0,
response.dataStats.bytesReceived || 0,
response.dataStats.tokensUsed || 0
);
}
console.log("User input submission response:", response);
return response;
} catch (error) {
console.error("Error submitting user input:", error);
throw error;
}
}
/**
* Stops a running workflow
* @param {string} workflowId - ID of the workflow
* @returns {Promise<Object>} - API response
*/
async function stopWorkflow(workflowId) {
try {
console.log(`Stopping workflow ${workflowId}`);
// Immediately set pollActive to false to prevent further polling
const workflowState = WorkflowCoordination.getWorkflowState();
workflowState.pollActive = false;
// Make API call
const response = await api.stopWorkflow(workflowId);
console.log("Workflow stop response:", response);
return response;
} catch (error) {
console.error("Error stopping workflow:", error);
// Ensure pollActive is set to false even on error
const workflowState = WorkflowCoordination.getWorkflowState();
workflowState.pollActive = false;
throw error;
}
}
/**
* Polls workflow status and updates state accordingly
* @param {string} workflowId - ID of the workflow
*/
async function pollWorkflowStatus(workflowId) {
try {
if (!workflowId) {
console.warn("Cannot poll workflow status: No workflow ID provided");
return;
}
const workflowState = WorkflowCoordination.getWorkflowState();
// Only poll if we have an active polling flag
if (!workflowState.pollActive) {
console.log(`Polling stopped: pollActive=${workflowState.pollActive}, status=${workflowState.status}`);
return;
}
console.log(`Polling status for workflow ${workflowId}, pollActive=${workflowState.pollActive}, status=${workflowState.status}`);
// Get workflow status from API
const statusResponse = await api.getWorkflowStatus(workflowId);
// Check if polling should continue after API call
// This prevents race conditions where polling state changes during API call
if (!workflowState.pollActive) {
console.log(`Polling aborted after API call: pollActive=${workflowState.pollActive}`);
return;
}
if (!statusResponse) {
console.warn("No status response received");
return;
}
console.log(`Received status for workflow ${workflowId}:`, statusResponse);
// Update stats if available
if (statusResponse.dataStats) {
WorkflowCoordination.updateDataStats(
statusResponse.dataStats.bytesSent || 0,
statusResponse.dataStats.bytesReceived || 0,
statusResponse.dataStats.tokensUsed || 0
);
}
// Get status value from response (may be in different locations in the response)
const status = statusResponse.status ||
(statusResponse.workflow ? statusResponse.workflow.status : null);
if (!status) {
console.warn("Status value not found in response");
return;
}
if (status !== workflowState.status) {
console.log(`Workflow status changed: ${workflowState.status}${status}`);
// Update state based on new status
WorkflowCoordination.updateWorkflowStatus(status, {
message: `Workflow status updated to: ${status}`,
systemMessage: getSystemMessageForStatus(status)
});
// If status is 'failed' or 'stopped', stop polling immediately
// For 'completed', polling continues until 'last' message is received
if (status === 'failed' || status === 'stopped') {
workflowState.pollActive = false;
console.log(`Workflow ${workflowId} reached terminal state: ${status}, polling stopped`);
return;
}
}
// Poll for logs and messages
await Promise.all([
pollWorkflowLogs(workflowId),
pollWorkflowMessages(workflowId)
]);
// Check polling active flag again after API calls to avoid race conditions
if (workflowState.pollActive) {
setTimeout(() => pollWorkflowStatus(workflowId), 2000);
} else {
console.log(`Polling stopped after API calls: pollActive=${workflowState.pollActive}`);
}
} catch (error) {
console.error("Error polling workflow status:", error);
// Get current state for error handling
const workflowState = WorkflowCoordination.getWorkflowState();
// Implement retry with exponential backoff only if we're still supposed to be polling
if (workflowState.pollActive) {
const backoffTime = workflowState.pollFailCount ?
Math.min(2000 * Math.pow(1.5, workflowState.pollFailCount), 16000) :
2000;
console.log(`Retrying poll in ${backoffTime}ms (attempt ${(workflowState.pollFailCount || 0) + 1})`);
setTimeout(() => pollWorkflowStatus(workflowId), backoffTime);
// Update fail count
workflowState.pollFailCount = (workflowState.pollFailCount || 0) + 1;
// After multiple failures, show error and stop polling
if (workflowState.pollFailCount > 5) {
WorkflowCoordination.addLogEntry(
"Connection issues detected. Please check your network connection.",
"error"
);
// Set failed state and stop polling after multiple failures
WorkflowCoordination.updateWorkflowStatus('failed', {
message: "Workflow failed due to connection issues",
systemMessage: "Failed to connect to the server"
});
workflowState.pollActive = false;
}
}
}
}
/**
* Gets appropriate system message for status change
* @param {string} status - New workflow status
* @returns {string} - System message
*/
function getSystemMessageForStatus(status) {
switch (status) {
case 'completed':
return "Workflow completed successfully";
case 'failed':
return "Workflow failed to complete";
case 'stopped':
return "Workflow was stopped";
default:
return null;
}
}
/**
* Polls workflow logs and updates UI
* @param {string} workflowId - ID of the workflow
*/
async function pollWorkflowLogs(workflowId) {
try {
// Get current workflow state
const workflowState = WorkflowCoordination.getWorkflowState();
// Get logs from API
console.info("polling api request param id:", workflowState.lastPolledLogId);
const logs = await api.getWorkflowLogs(workflowId, workflowState.lastPolledLogId);
console.log("Received workflow logs:", logs); // DEBUG
// Update data statistics if available in response
if (logs && logs.dataStats) {
WorkflowCoordination.updateDataStats(0, logs.dataStats.bytesReceived || 0);
}
// Properly handle different response formats
let logsArray = [];
if (Array.isArray(logs)) {
logsArray = logs;
} else{
console.log("ERROR: Log meessages in wrong format")
return;
}
if (!logsArray || logsArray.length === 0) {
return;
}
console.log(`Processing ${logsArray.length} new logs`); // Debug logging
// Process new logs
const existingLogIds = new Set(workflowState.logs.map(log => log.id));
logsArray.forEach(log => {
// Only process new logs
if (!existingLogIds.has(log.id)) {
// Ensure log has all required properties
const processedLog = {
id: log.id,
message: log.message || 'No message',
type: log.type || 'info',
timestamp: log.timestamp || new Date().toISOString(),
agentName: log.agentName || null,
details: log.details || null,
progress: log.progress !== undefined ? log.progress : undefined,
status: log.status || null
};
// Add log entry
WorkflowCoordination.addLogEntry(
processedLog.message,
processedLog.type,
processedLog.details,
processedLog.agentName,
processedLog.progress
);
// Store ID of last processed log
workflowState.lastPolledLogId = log.id;
console.info("polling api next param id:", workflowState.lastPolledLogId);
}
});
} catch (error) {
console.error("Error polling workflow logs:", error);
// Fix: Set pollActive to false using correct boolean value
const workflowState = WorkflowCoordination.getWorkflowState();
// Only mark as failed after repeated errors in running state
if (workflowState.pollFailCount > 3 && workflowState.status === 'running') {
WorkflowCoordination.updateWorkflowStatus('failed', {
message: "Failed to retrieve workflow logs",
systemMessage: "Error communicating with server"
});
workflowState.pollActive = false;
}
}
}
/**
* Polls workflow messages and updates UI
* @param {string} workflowId - ID of the workflow
*/
async function pollWorkflowMessages(workflowId) {
try {
// Get current workflow state
const workflowState = WorkflowCoordination.getWorkflowState();
// Get messages from API
const messagesResponse = await api.getWorkflowMessages(workflowId, workflowState.lastPolledMessageId);
// Extract messages array and handle different response formats
const messages = Array.isArray(messagesResponse) ? messagesResponse :
(messagesResponse && messagesResponse.messages ? messagesResponse.messages : []);
if (!messages || messages.length === 0) {
return;
}
// Process new messages
const existingMessageIds = new Set(workflowState.chatMessages.map(msg => msg.id));
messages.forEach(message => {
// Only process new messages
if (!existingMessageIds.has(message.id)) {
const newMessage = {
id: message.id,
agentName: message.agentName || '',
content: message.content || '',
role: message.role || '',
timestamp: message.startedAt || message.timestamp || new Date().toISOString(),
documents: message.documents || [],
status: message.status || null
};
WorkflowCoordination.addChatMessage(newMessage);
workflowState.lastPolledMessageId = message.id;
// Check for last message
if (message.status === 'last') {
console.log("Last message found, stopping polling");
workflowState.pollActive = false;
}
}
});
} catch (error) {
console.error("Error polling workflow messages:", error);
}
}
/**
* Deletes a message from a workflow
* @param {string} workflowId - ID of the workflow
* @param {string} messageId - ID of the message to delete
* @returns {Promise<boolean>} - True if successful, false otherwise
*/
async function deleteWorkflowMessage(workflowId, messageId) {
if (!workflowId || !messageId) {
console.error("Invalid parameters for deleteWorkflowMessage");
return false;
}
try {
// Make API call
const success = await api.deleteWorkflowMessage(workflowId, messageId);
console.log("Delete message response:", success);
// Return success status
return Boolean(success);
} catch (error) {
console.error("Error deleting message:", error);
// Be forgiving with 404 errors - the message might already be gone
if (error.message && error.message.includes("404")) {
console.log("Message not found (404), considering it deleted");
return true;
}
return false;
}
}
/**
* Deletes a file from a workflow message
* @param {string} workflowId - ID of the workflow
* @param {string} messageId - ID of the message
* @param {string} fileId - ID of the file to delete
* @returns {Promise<boolean>} - True if successful, false otherwise
*/
async function deleteFileFromMessage(workflowId, messageId, fileId) {
// Delete file inside message
try {
console.log(`Deleting file from message: workflow=${workflowId}, message=${messageId}, file=${fileId}`);
// Make API call
const success = await api.deleteFileFromMessage(workflowId, messageId, fileId);
console.log("File deletion response:", success);
// Return success status
return Boolean(success);
}
catch (error) {
console.error("Error deleting file from message:", error);
// If file not found, still return success to update UI
if (error.message && (error.message.includes("404") || error.message.includes("not found"))) {
console.log("File not found, removing from UI");
return true;
}
return false;
}
}
// Export functions
export {
initDataLayer,
createWorkflow,
submitUserInput,
stopWorkflow,
pollWorkflowStatus,
pollWorkflowLogs,
pollWorkflowMessages,
uploadAndAddFile,
deleteWorkflowMessage,
deleteFileFromMessage
};

File diff suppressed because it is too large Load diff

View file

@ -1,477 +0,0 @@
/**
* Utility functions for the workflow module
* Contains pure helper functions with no state or side effects
*/
/**
* Formats a file size into a human-readable string
* @param {number} bytes - The file size in bytes
* @param {number} decimals - Number of decimal places to show
* @returns {string} - Formatted file size string
*/
export function formatFileSize(bytes, decimals = 1) {
if (bytes === 0 || bytes === null || bytes === undefined) return '0 Bytes';
const k = 1024;
const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(decimals)) + ' ' + sizes[i];
}
/**
* Gets an appropriate Font Awesome icon class for a file type
* @param {Object} file - File object with mimeType property
* @returns {string} - Font Awesome icon class
*/
export function getFileTypeIcon(file) {
if (!file) return 'fa-file';
// Normalize content type field name
const contentType = file.mimeType;
// Image files
if (contentType.includes('image')) return 'fa-file-image';
// Document files
if (contentType.includes('pdf')) return 'fa-file-pdf';
if (contentType.includes('msword') || contentType.includes('wordprocessingml')) return 'fa-file-word';
if (contentType.includes('spreadsheetml') || contentType.includes('excel')) return 'fa-file-excel';
if (contentType.includes('presentationml') || contentType.includes('powerpoint')) return 'fa-file-powerpoint';
// Text files
if (contentType.includes('text/plain')) return 'fa-file-alt';
if (contentType.includes('text/markdown') || contentType.includes('md')) return 'fa-file-alt';
if (contentType.includes('text/csv') || contentType.includes('csv')) return 'fa-file-csv';
// Code files
if (contentType.includes('javascript') ||
contentType.includes('typescript') ||
contentType.includes('json')) return 'fa-file-code';
if (contentType.includes('html') ||
contentType.includes('xml') ||
contentType.includes('css')) return 'fa-file-code';
// Archive files
if (contentType.includes('zip') ||
contentType.includes('rar') ||
contentType.includes('tar') ||
contentType.includes('gzip')) return 'fa-file-archive';
// Audio files
if (contentType.includes('audio')) return 'fa-file-audio';
// Video files
if (contentType.includes('video')) return 'fa-file-video';
// Default
return 'fa-file';
}
/**
* Formats text with markdown-like styling for rendering in the UI
* @param {string} text - Text to format
* @returns {string} - HTML-formatted text
*/
export function formatMarkdownLike(text) {
if (!text) return '';
// Convert to string if not already
const textStr = String(text);
// Escape HTML to prevent XSS
let formattedText = escapeHtml(textStr);
// Format code blocks
formattedText = formatCodeBlocks(formattedText);
// Format inline code
formattedText = formattedText.replace(/`([^`]+)`/g, '<code>$1</code>');
// Format bold text
formattedText = formattedText.replace(/\*\*([^*]+)\*\*/g, '<strong>$1</strong>');
formattedText = formattedText.replace(/__([^_]+)__/g, '<strong>$1</strong>');
// Format italic text
formattedText = formattedText.replace(/\*([^*]+)\*/g, '<em>$1</em>');
formattedText = formattedText.replace(/_([^_]+)_/g, '<em>$1</em>');
// Format links
formattedText = formattedText.replace(/\[([^\]]+)\]\(([^)]+)\)/g, '<a href="$2" target="_blank">$1</a>');
// Format headings
formattedText = formattedText.replace(/^### (.+)$/gm, '<h3>$1</h3>');
formattedText = formattedText.replace(/^## (.+)$/gm, '<h2>$1</h2>');
formattedText = formattedText.replace(/^# (.+)$/gm, '<h1>$1</h1>');
// Format unordered lists
formattedText = formatLists(formattedText);
// Format line breaks
formattedText = formattedText.replace(/\n/g, '<br>');
return formattedText;
}
/**
* Escapes HTML special characters to prevent XSS attacks
* @param {string} html - Text that might contain HTML
* @returns {string} - Escaped text
*/
function escapeHtml(html) {
const escapeMap = {
'&': '&amp;',
'<': '&lt;',
'>': '&gt;',
'"': '&quot;',
"'": '&#039;'
};
return html.replace(/[&<>"']/g, m => escapeMap[m]);
}
/**
* Formats code blocks with syntax highlighting classes
* @param {string} text - Text to format
* @returns {string} - Text with formatted code blocks
*/
function formatCodeBlocks(text) {
// Match triple backtick code blocks with optional language specification
const codeBlockRegex = /```(\w*)\n([\s\S]+?)\n```/g;
return text.replace(codeBlockRegex, (match, language, code) => {
const langClass = language ? ` class="language-${language}"` : '';
const formattedCode = escapeHtml(code);
return `<pre><code${langClass}>${formattedCode}</code></pre>`;
});
}
/**
* Formats unordered and ordered lists
* @param {string} text - Text to format
* @returns {string} - Text with formatted lists
*/
function formatLists(text) {
let formatted = text;
// Check if there are any list items
if (formatted.match(/^[*-] .+/gm)) {
// Split by newline to process lines
const lines = formatted.split('\n');
let inList = false;
let listHtml = '';
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
if (line.match(/^[*-] .+/)) {
if (!inList) {
// Start a new list
listHtml += '<ul>';
inList = true;
}
// Add list item
const itemContent = line.substring(2);
listHtml += `<li>${itemContent}</li>`;
} else if (line.match(/^\d+\. .+/)) {
if (!inList || !listHtml.includes('<ol>')) {
// Close any open unordered list
if (inList && listHtml.includes('<ul>')) {
listHtml += '</ul>';
}
// Start a new ordered list
listHtml += '<ol>';
inList = true;
}
// Add ordered list item
const itemContent = line.replace(/^\d+\. /, '');
listHtml += `<li>${itemContent}</li>`;
} else if (inList) {
// End the list
if (listHtml.includes('<ol>')) {
listHtml += '</ol>';
} else {
listHtml += '</ul>';
}
inList = false;
listHtml += line + '\n';
} else {
listHtml += line + '\n';
}
}
// Close list if still open at the end
if (inList) {
if (listHtml.includes('<ol>')) {
listHtml += '</ol>';
} else {
listHtml += '</ul>';
}
}
formatted = listHtml;
}
return formatted;
}
/**
* Debounces a function to limit how often it can run
* @param {Function} func - Function to debounce
* @param {number} wait - Time to wait in milliseconds
* @returns {Function} - Debounced function
*/
export function debounce(func, wait = 300) {
let timeout;
return function executedFunction(...args) {
const later = () => {
clearTimeout(timeout);
func(...args);
};
clearTimeout(timeout);
timeout = setTimeout(later, wait);
};
}
/**
* Creates a throttled function that only invokes func once per wait period
* @param {Function} func - The function to throttle
* @param {number} wait - Milliseconds to wait between invocations
* @returns {Function} - Throttled function
*/
export function throttle(func, wait = 300) {
let lastCall = 0;
return function(...args) {
const now = Date.now();
if (now - lastCall >= wait) {
lastCall = now;
return func(...args);
}
};
}
/**
* Creates a random ID with an optional prefix
* @param {string} prefix - Optional prefix for the ID
* @returns {string} - Random ID
*/
export function generateId(prefix = '') {
return `${prefix}${Date.now()}_${Math.random().toString(36).substring(2,11)}`;
}
/**
* Deep clones an object
* @param {Object} obj - Object to clone
* @returns {Object} - Cloned object
*/
export function deepClone(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Handle Date
if (obj instanceof Date) {
return new Date(obj.getTime());
}
// Handle Array
if (Array.isArray(obj)) {
return obj.map(item => deepClone(item));
}
// Handle Object
if (obj instanceof Object) {
const copy = {};
Object.keys(obj).forEach(key => {
copy[key] = deepClone(obj[key]);
});
return copy;
}
throw new Error(`Unable to copy obj! Its type isn't supported: ${typeof obj}`);
}
/**
* Extracts text content from HTML string
* @param {string} html - HTML string
* @returns {string} - Plain text content
*/
export function stripHtml(html) {
if (!html) return '';
// Create a temporary element
const tempDiv = document.createElement('div');
tempDiv.innerHTML = html;
// Return the text content
return tempDiv.textContent || tempDiv.innerText || '';
}
/**
* Checks if two objects are equal in value
* @param {Object} obj1 - First object
* @param {Object} obj2 - Second object
* @returns {boolean} - Whether objects are equal
*/
export function objectEquals(obj1, obj2) {
return JSON.stringify(obj1) === JSON.stringify(obj2);
}
/**
* Converts a string to title case
* @param {string} str - String to convert
* @returns {string} - Title case string
*/
export function toTitleCase(str) {
if (!str) return '';
return str.replace(
/\w\S*/g,
txt => txt.charAt(0).toUpperCase() + txt.substring(1).toLowerCase()
);
}
/**
* Truncates a string to a given length
* @param {string} str - String to truncate
* @param {number} length - Maximum length
* @param {string} suffix - Suffix to add if truncated
* @returns {string} - Truncated string
*/
export function truncate(str, length = 50, suffix = '...') {
if (!str) return '';
if (str.length <= length) {
return str;
}
return str.substring(0, length - suffix.length) + suffix;
}
/**
* Parses a string containing JSON and returns the parsed object
* @param {string} jsonString - String to parse
* @param {*} defaultValue - Default value to return if parsing fails
* @returns {*} - Parsed object or default value
*/
export function safeJsonParse(jsonString, defaultValue = {}) {
try {
return JSON.parse(jsonString);
} catch (e) {
console.warn('Error parsing JSON:', e);
return defaultValue;
}
}
/**
* Returns a formatted date string
* @param {string|Date} date - Date to format
* @param {string} format - Format string ('short', 'medium', 'long', 'full')
* @returns {string} - Formatted date string
*/
export function formatDate(date, format = 'medium') {
const dateObj = typeof date === 'string' ? new Date(date) : date;
if (!(dateObj instanceof Date) || isNaN(dateObj)) {
return '';
}
switch (format) {
case 'short':
return dateObj.toLocaleString(undefined, {
month: 'numeric',
day: 'numeric',
hour: '2-digit',
minute: '2-digit'
});
case 'time':
return dateObj.toLocaleTimeString();
case 'long':
return dateObj.toLocaleString(undefined, {
year: 'numeric',
month: 'long',
day: 'numeric',
hour: '2-digit',
minute: '2-digit'
});
case 'full':
return dateObj.toLocaleString(undefined, {
weekday: 'long',
year: 'numeric',
month: 'long',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
second: '2-digit'
});
case 'medium':
default:
return dateObj.toLocaleString();
}
}
/**
* Creates an event emitter with a simple pub/sub interface
* @returns {Object} - Event emitter object
*/
export function createEventEmitter() {
const events = {};
return {
/**
* Subscribe to an event
* @param {string} event - Event name
* @param {Function} callback - Event handler
* @returns {Function} - Unsubscribe function
*/
subscribe(event, callback) {
if (!events[event]) {
events[event] = [];
}
events[event].push(callback);
// Return unsubscribe function
return () => {
events[event] = events[event].filter(cb => cb !== callback);
};
},
/**
* Publish an event with data
* @param {string} event - Event name
* @param {*} data - Event data
*/
publish(event, data) {
if (!events[event]) return;
events[event].forEach(callback => {
callback(data);
});
},
/**
* Clear all subscriptions for an event
* @param {string} event - Event name
*/
clear(event) {
if (event) {
delete events[event];
} else {
// Clear all events if no event specified
Object.keys(events).forEach(key => {
delete events[key];
});
}
}
};
}

View file

@ -1,307 +0,0 @@
/**
* mandates.js
* Mandanten-Modul mit dem generischen Entitätsansatz
* Angepasst an das überarbeitete Backend und apiCalls.js
*/
import api from '../shared/apiCalls.js';
/**
* Initialisierungsfunktion für das Mandanten-Modul
* @param {Object} globalState - Globaler Zustand der Anwendung
*/
function initMandatesModule(globalState) {
// Prüfen, ob der Benutzer die erforderlichen Rechte hat (nur SysAdmin)
if (!globalState.user || !globalState.user.isSysAdmin) {
// Mandanten-Sektion ausblenden oder Hinweis anzeigen
const mandatesView = document.getElementById('mandates-view');
if (mandatesView) {
mandatesView.innerHTML = `
<h2>Mandantenverwaltung</h2>
<div class="card">
<div class="alert alert-warning">
<i class="fas fa-exclamation-triangle"></i>
Sie benötigen SysAdmin-Rechte, um Mandanten zu verwalten.
</div>
</div>
`;
}
return;
}
// Generisches Entitätsmodul für Mandanten initialisieren
window.genericEntityModule.init(globalState, {
entityType: 'mandate',
apiEndpoint: {
get: api.getMandates, // API-Funktion zum Abrufen aller Mandanten
create: api.createMandate, // API-Funktion zum Erstellen eines Mandanten
update: api.updateMandate, // API-Funktion zum Aktualisieren eines Mandanten
delete: api.deleteMandate // API-Funktion zum Löschen eines Mandanten
},
listContainerId: 'mandates-list-container',
addButtonId: 'add-mandate-btn',
// Benutzerdefinierte Render-Funktion für Mandanten
renderItem: function(mandate) {
// Status-Badge (aktiv/deaktiviert)
const statusBadge = mandate.disabled ?
'<span class="badge badge-red">Deaktiviert</span>' :
'<span class="badge badge-green">Aktiv</span>';
return `
<div class="list-header">
<div>
<h4>${mandate.name || 'Unbenannter Mandant'}</h4>
</div>
<div class="mandate-badges">
${statusBadge}
<span class="badge badge-blue">Sprache: ${mandate.language || 'de'}</span>
</div>
</div>
<div class="list-body">
${mandate.description ? `<div class="mandate-description">${mandate.description}</div>` : ''}
${mandate.contactEmail ? `<div class="mandate-email">${mandate.contactEmail}</div>` : ''}
${mandate.createdAt ? `Erstellt am: ${new Date(mandate.createdAt).toLocaleString()}` : ''}
</div>
`;
},
// Behandlung spezieller Attributformate
transformFormData: function(formData) {
// Boolean-Wert für "disabled" korrekt umwandeln
if (formData.hasOwnProperty('disabled') && typeof formData.disabled === 'string') {
formData.disabled = formData.disabled.toLowerCase() === 'true';
}
return formData;
},
// Zusätzliche Aktionen für Mandanten
getItemActions: function(mandate) {
const actions = [];
// Benutzer anzeigen Aktion
actions.push({
text: 'Benutzer anzeigen',
className: 'entity-view-btn',
html: '<i class="fas fa-users"></i>',
handler: function(mandate) {
showMandateUsers(mandate);
}
});
// Mandant aktivieren/deaktivieren
if (mandate.disabled) {
actions.push({
text: 'Aktivieren',
className: 'entity-activate-btn',
html: '<i class="fas fa-check-circle"></i>',
handler: function(mandate) {
toggleMandateStatus(mandate, false);
}
});
} else {
actions.push({
text: 'Deaktivieren',
className: 'entity-deactivate-btn',
html: '<i class="fas fa-ban"></i>',
handler: function(mandate) {
toggleMandateStatus(mandate, true);
}
});
}
return actions;
},
// Callbacks
onItemCreated: function(mandate) {
showToast(`Mandant "${mandate.name}" erstellt`);
},
onItemUpdated: function(mandate) {
showToast(`Mandant "${mandate.name}" aktualisiert`);
},
onItemDeleted: function(mandateId) {
showToast(`Mandant gelöscht`);
}
});
}
/**
* Mandant aktivieren/deaktivieren
* @param {Object} mandate - Der Mandant
* @param {boolean} disabled - Neuer Status (true = deaktiviert, false = aktiviert)
*/
async function toggleMandateStatus(mandate, disabled) {
try {
// Status ändern via API
const response = await api.updateMandate(mandate.id, { disabled });
if (response) {
showToast(`Mandant "${mandate.name}" ${disabled ? 'deaktiviert' : 'aktiviert'}`);
// Liste neu laden
if (window.genericEntityModule) {
window.genericEntityModule.loadAndRenderItems();
}
}
} catch (error) {
console.error(`Fehler beim Ändern des Mandantenstatus:`, error);
showError(`Fehler beim Ändern des Mandantenstatus`, error.message);
}
}
/**
* Zeigt alle Benutzer eines Mandanten an
* @param {Object} mandate - Der Mandant
*/
async function showMandateUsers(mandate) {
try {
// Benutzer für den Mandanten abrufen
const users = await api.getUsers(mandate.id);
if (!users || users.length === 0) {
showToast(`Keine Benutzer für Mandant "${mandate.name}" gefunden.`);
return;
}
// Modal mit Benutzerliste erstellen
let userListHTML = `
<h3>Benutzer von Mandant "${mandate.name}"</h3>
<div class="mandate-users-list">
<table class="users-table">
<thead>
<tr>
<th>Benutzername</th>
<th>Name</th>
<th>E-Mail</th>
<th>Rolle</th>
<th>Status</th>
</tr>
</thead>
<tbody>
`;
users.forEach(user => {
// Status und Rolle ermitteln
const status = user.disabled ?
'<span class="badge badge-red">Deaktiviert</span>' :
'<span class="badge badge-green">Aktiv</span>';
let role = '<span class="badge badge-gray">User</span>';
if (user.privilege === 'admin') {
role = '<span class="badge badge-blue">Admin</span>';
} else if (user.privilege === 'sysadmin') {
role = '<span class="badge badge-purple">SysAdmin</span>';
}
userListHTML += `
<tr>
<td>${user.username}</td>
<td>${user.fullName || '-'}</td>
<td>${user.email || '-'}</td>
<td>${role}</td>
<td>${status}</td>
</tr>
`;
});
userListHTML += `
</tbody>
</table>
</div>
`;
// Modal anzeigen
if (window.appUtils && window.appUtils.ui && window.appUtils.ui.createModal) {
window.appUtils.ui.createModal({
title: `Benutzer von Mandant "${mandate.name}"`,
content: userListHTML,
submitText: 'Schließen',
cancelText: null, // Kein Abbrechen-Button
onSubmit: () => {
// Einfach schließen
}
});
} else {
// Fallback: Alert mit Benutzerzahl
showToast(`Der Mandant "${mandate.name}" hat ${users.length} Benutzer.`);
}
} catch (error) {
console.error(`Fehler beim Abrufen der Mandantenbenutzer:`, error);
showError(`Fehler beim Abrufen der Benutzer`, error.message);
}
}
/**
* Erstellt einen neuen Mandanten mit Default-Werten
* @param {Object} globalStateObj - Globaler Zustand der Anwendung
* @returns {Promise<Object>} - Der erstellte Mandant
*/
async function createDefaultMandate(globalStateObj) {
try {
// Default-Mandant erstellen
const defaultMandate = {
name: "Neuer Mandant",
description: "Beschreibung des Mandanten",
language: "de",
contactEmail: ""
};
// An die API senden
const newMandate = await api.createMandate(defaultMandate);
// UI aktualisieren
if (window.genericEntityModule) {
window.genericEntityModule.loadAndRenderItems();
}
return newMandate;
} catch (error) {
console.error("Fehler beim Erstellen des Default-Mandanten:", error);
showError("Fehler beim Erstellen", error.message);
throw error;
}
}
/**
* Zeigt eine Toast-Nachricht an
* @param {string} message - Anzuzeigende Nachricht
*/
function showToast(message) {
if (window.appUtils && window.appUtils.ui && window.appUtils.ui.showToast) {
window.appUtils.ui.showToast(message);
} else if (window.globalUtils && window.globalUtils.showError) {
window.globalUtils.showError("Info", message);
} else {
console.log(message);
}
}
/**
* Zeigt eine Fehlermeldung an
* @param {string} title - Titel der Fehlermeldung
* @param {string} message - Fehlermeldung
*/
function showError(title, message) {
if (window.appUtils && window.appUtils.ui && window.appUtils.ui.showError) {
window.appUtils.ui.showError(title, message);
} else if (window.globalUtils && window.globalUtils.showError) {
window.globalUtils.showError(title, message);
} else {
console.error(`${title}: ${message}`);
}
}
// Mandates-Modul-Schnittstelle definieren und exportieren
window.initMandatesModule = initMandatesModule;
// Export für ES Module
export {
initMandatesModule,
toggleMandateStatus,
showMandateUsers,
createDefaultMandate
};

View file

@ -1,303 +0,0 @@
/*
* PowerOn | Multi-Agent Service - Chat Component Styles
* Styles for chat messages and interaction
*/
/* Chat area header */
.chat-area-header {
padding: 0.75rem 1rem;
background-color: #f8f9fa;
border-bottom: 1px solid #e5e7eb;
z-index: 10;
position: sticky;
top: 0;
}
.chat-area-header h3 {
margin: 0;
}
/* Chat messages container */
.agent-chat-messages {
flex: 1;
overflow-y: auto;
padding: 1rem;
scrollbar-width: thin;
}
/* Chat message types */
.chat-message {
margin-bottom: 1rem;
max-width: 90%;
width: 100%;
}
.system-message {
margin: 0.5rem auto;
text-align: center;
color: #6b7280;
font-size: 0.75rem;
font-style: italic;
background-color: #f9fafb;
padding: 0.25rem 0.5rem;
border-radius: 1rem;
max-width: 80%;
}
.agent-message {
padding: 0.75rem;
background-color: white;
border-radius: 0.375rem;
border-left: 3px solid #3b82f6;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
margin-left: 0.5rem;
margin-right: 0.5rem;
}
.agent-message.user {
border-left-color: #10b981;
background-color: #f0fdf4;
}
/* Special styling for messages between Moderator and User Agent */
.agent-message.moderator-to-user {
border-left-color: #f59e0b;
background-color: #fffbeb;
border-width: 3px;
}
.moderator-to-user .agent-name {
color: #b45309;
}
/* Message header */
.message-header {
display: flex;
justify-content: space-between;
margin-bottom: 0.25rem;
font-size: 0.75rem;
}
.agent-name {
font-weight: 500;
}
.message-time {
color: #6b7280;
}
/* Message content with collapse feature */
.message-content {
font-size: 0.875rem;
}
.agent-message.collapsed .message-content {
max-height: 100px;
overflow: hidden;
position: relative;
}
.agent-message.collapsed .message-content::after {
content: "";
position: absolute;
bottom: 0;
left: 0;
right: 0;
height: 40px;
background: linear-gradient(transparent, white);
}
.agent-message .toggle-content {
text-align: center;
color: #3b82f6;
font-size: 0.75rem;
cursor: pointer;
padding: 0.25rem;
user-select: none;
}
.agent-message .toggle-content:hover {
text-decoration: underline;
}
/* Files in messages */
.message-files {
margin-top: 0.75rem;
border-top: 1px solid #e5e7eb;
padding-top: 0.5rem;
}
.files-heading {
font-size: 0.75rem;
font-weight: 500;
color: #6b7280;
margin-bottom: 0.5rem;
}
.files-list {
list-style: none;
padding: 0;
margin: 5px 0;
}
.file-item {
display: flex;
align-items: center;
padding: 6px 8px;
background-color: #f5f5f5;
border-radius: 4px;
margin-bottom: 5px;
transition: background-color 0.2s;
font-size: 0.8rem;
}
.file-item:hover {
background-color: #e9e9e9;
}
.file-item i:first-child {
margin-right: 8px;
color: #555;
}
.file-name {
flex: 1;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
margin-right: 10px;
}
.file-actions {
display: flex;
gap: 5px;
}
.preview-file-btn, .delete-file-btn {
background: none;
border: none;
cursor: pointer;
padding: 4px;
border-radius: 3px;
transition: background-color 0.2s;
}
.preview-file-btn:hover, .delete-file-btn:hover {
background-color: rgba(0, 0, 0, 0.1);
}
.preview-file-btn i {
color: #2196F3;
}
.delete-file-btn i {
color: #F44336;
}
/* Message delete button */
.message-delete-container {
display: none;
margin-left: 8px;
}
.chat-message:hover .message-delete-container {
display: block;
}
.message-delete-btn {
background: none;
border: none;
color: #9ca3af;
font-size: 0.7rem;
cursor: pointer;
padding: 2px;
transition: color 0.2s;
}
.message-delete-btn:hover {
color: #ef4444;
}
/* User prompt area */
.user-prompt {
background-color: #fffbeb;
border-left: 4px solid #f59e0b;
padding: 0.75rem;
margin-bottom: 0.75rem;
border-radius: 0.25rem;
animation: fadeIn 0.3s ease;
}
.moderator-question {
font-size: 0.95rem;
}
.moderator-question strong {
color: #b45309;
font-weight: 600;
}
/* Previous result */
.previous-result {
font-size: 0.875rem;
line-height: 1.4;
}
.previous-result strong {
color: #4b5563;
margin-right: 0.25rem;
}
/* Empty chat state */
#empty-chat-state {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100%;
color: #9ca3af;
text-align: center;
padding: 2rem;
}
#empty-chat-state i {
font-size: 3rem;
color: #d1d5db;
margin-bottom: 1rem;
}
#empty-chat-state h4 {
margin: 0.5rem 0;
color: #4b5563;
}
#empty-chat-state p {
margin: 0;
color: #6b7280;
font-size: 0.875rem;
}
/* Animation */
@keyframes fadeIn {
from { opacity: 0; transform: translateY(5px); }
to { opacity: 1; transform: translateY(0); }
}
/* Scroll behavior */
.agent-chat-messages {
scrollbar-width: thin;
scrollbar-color: #d1d5db transparent;
}
.agent-chat-messages::-webkit-scrollbar {
width: 6px;
}
.agent-chat-messages::-webkit-scrollbar-track {
background: transparent;
}
.agent-chat-messages::-webkit-scrollbar-thumb {
background-color: #d1d5db;
border-radius: 3px;
}

View file

@ -1,464 +0,0 @@
/*
* PowerOn | Multi-Agent Service - Files Component Styles
* Styles for file upload, preview, and management
*/
/* File preview container */
.file-preview-container {
height: 100%;
display: flex;
flex-direction: column;
background-color: white;
}
.file-preview-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.75rem 1rem;
background-color: #f8f9fa;
border-bottom: 1px solid #e5e7eb;
}
.file-preview-header h4 {
margin: 0;
}
.file-preview-actions {
display: flex;
gap: 0.5rem;
}
.file-preview-content {
flex: 1;
overflow: auto;
padding: 1rem;
}
/* File preview types */
.file-preview-image {
max-width: 100%;
margin: 0 auto;
display: block;
}
.file-preview-text {
white-space: pre-wrap;
font-family: 'Consolas', 'Monaco', monospace;
font-size: 0.875rem;
overflow: auto;
padding: 0.5rem;
background-color: white;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
}
.file-preview-pdf {
width: 100%;
height: 100%;
min-height: 500px;
}
.file-preview-unsupported {
font-style: italic;
color: #6b7280;
margin-top: 1rem;
}
/* File information display */
.file-info {
text-align: center;
padding: 2rem;
}
.file-info h4 {
margin: 1rem 0 0.5rem;
}
.file-info p {
margin: 0.25rem 0;
color: #4b5563;
}
.file-actions {
display: flex;
align-items: center;
}
/* User input area for files */
.user-input-area {
padding: 1rem;
}
.user-input-container {
display: flex;
flex-direction: column;
gap: 0.75rem;
}
#user-message-input {
width: 100%;
min-height: 80px;
max-height: 150px;
resize: vertical;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
padding: 0.75rem;
font-family: inherit;
font-size: 0.875rem;
overflow-y: auto;
}
#user-message-input:focus {
outline: none;
border-color: #3b82f6;
box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.3);
}
#user-message-input.awaiting-input {
border-color: #f59e0b;
box-shadow: 0 0 0 2px rgba(245, 158, 11, 0.3);
}
.user-input-actions {
display: flex;
justify-content: space-between;
align-items: center;
}
/* Additional files container - improved visibility */
.additional-files-container {
display: flex;
flex-direction: column;
gap: 0.5rem;
padding: 10px;
height: 100%;
overflow-y: auto;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
background-color: #f9fafb;
}
.additional-file-item {
background-color: white;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
padding: 8px 10px;
display: flex;
align-items: center;
font-size: 0.875rem;
}
.additional-file-item i {
margin-right: 10px;
color: #6b7280;
}
.file-name {
flex: 1;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.remove-additional-file {
color: #ef4444;
cursor: pointer;
padding: 4px;
border-radius: 50%;
margin-left: 8px;
}
.remove-additional-file:hover {
background-color: #fee2e2;
}
/* File dropzone */
.file-dropzone-wrapper {
position: relative;
}
.file-dropzone-wrapper.dragging {
outline: 2px dashed #3b82f6;
background-color: rgba(59, 130, 246, 0.1);
}
.file-dropzone-wrapper.dragging::after {
content: "Drop files here";
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background-color: rgba(255, 255, 255, 0.9);
padding: 10px 15px;
border-radius: 4px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: 10;
font-weight: bold;
color: #2196F3;
}
/* Selected files preview - improved visibility */
.selected-files-preview {
margin: 8px 0 12px 0;
background-color: #f0f9ff; /* Light blue background for better visibility */
border: 1px solid #93c5fd; /* Blue border */
border-radius: 0.25rem;
padding: 8px;
display: block; /* Ensure it's visible */
width: 100%;
}
.files-preview-header {
font-size: 0.75rem;
font-weight: 600; /* Slightly bolder */
color: #3b82f6; /* Blue text to match border */
margin-bottom: 8px;
}
.files-preview-list {
list-style: none;
padding: 0;
margin: 0;
}
.file-preview-item {
display: flex;
align-items: center;
padding: 6px 8px;
background-color: white;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
margin-bottom: 6px;
font-size: 0.8rem;
}
.file-preview-item:last-child {
margin-bottom: 0;
}
.file-preview-item i {
margin-right: 8px;
color: #6b7280;
}
.file-preview-item span {
flex: 1;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.remove-file-btn {
color: #ef4444;
background: none;
border: none;
padding: 4px;
cursor: pointer;
transition: background-color 0.2s;
border-radius: 50%;
}
.remove-file-btn:hover {
background-color: #fee2e2;
}
/* No files message */
.no-files-message {
color: #6b7280;
font-style: italic;
text-align: center;
padding: 20px;
}
/* Loading states */
.loading-preview {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 2rem;
color: #6b7280;
}
.loading-preview i {
font-size: 2rem;
margin-bottom: 0.5rem;
}
/* Error states */
.error-state {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 2rem;
color: #6b7280;
text-align: center;
}
.error-state i {
font-size: 3rem;
color: #ef4444;
margin-bottom: 1rem;
}
.error-state h4 {
margin: 0 0 0.5rem 0;
color: #111827;
}
.error-state p {
margin: 0;
color: #6b7280;
}
/* Animation */
@keyframes pulse {
0% {
box-shadow: 0 0 0 0 rgba(245, 158, 11, 0.4);
}
70% {
box-shadow: 0 0 0 8px rgba(245, 158, 11, 0);
}
100% {
box-shadow: 0 0 0 0 rgba(245, 158, 11, 0);
}
}
.pulse-attention {
animation: pulse 2s infinite;
}
/* Formatted preview */
.formatted-preview {
padding: 15px;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
line-height: 1.6;
color: #333;
overflow: auto;
}
.formatted-preview code {
font-family: 'Courier New', Courier, monospace;
background-color: #f5f5f5;
padding: 2px 4px;
border-radius: 3px;
font-size: 0.9em;
color: #d63384;
}
.formatted-preview pre {
background-color: #f8f8f8;
border: 1px solid #ddd;
border-radius: 4px;
padding: 10px;
overflow: auto;
margin: 10px 0;
}
.formatted-preview pre code {
background-color: transparent;
padding: 0;
color: #333;
font-size: 0.9em;
white-space: pre;
}
.formatted-preview h1,
.formatted-preview h2,
.formatted-preview h3 {
margin-top: 24px;
margin-bottom: 16px;
font-weight: 600;
line-height: 1.25;
}
.formatted-preview h1 {
font-size: 1.5em;
border-bottom: 1px solid #eaecef;
padding-bottom: 0.3em;
}
.formatted-preview h2 {
font-size: 1.25em;
border-bottom: 1px solid #eaecef;
padding-bottom: 0.3em;
}
.formatted-preview h3 {
font-size: 1em;
}
.formatted-preview ul,
.formatted-preview ol {
padding-left: 2em;
margin-top: 0;
margin-bottom: 16px;
}
.formatted-preview li + li {
margin-top: 0.25em;
}
.formatted-preview table {
border-collapse: collapse;
width: 100%;
margin: 16px 0;
}
.formatted-preview table th,
.formatted-preview table td {
border: 1px solid #ddd;
padding: 6px 13px;
}
.formatted-preview table tr {
background-color: #fff;
border-top: 1px solid #c6cbd1;
}
.formatted-preview table tr:nth-child(2n) {
background-color: #f6f8fa;
}
.formatted-preview table th {
background-color: #f6f8fa;
font-weight: 600;
}
/* Copy notification */
.copy-notification {
position: absolute;
top: 20px;
right: 20px;
background-color: #10b981;
color: white;
padding: 0.5rem 1rem;
border-radius: 0.25rem;
animation: fadeIn 0.3s ease, fadeOut 0.5s ease 1.5s forwards;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(-10px); }
to { opacity: 1; transform: translateY(0); }
}
@keyframes fadeOut {
from { opacity: 1; transform: translateY(0); }
to { opacity: 0; transform: translateY(-10px); }
}
.additional-files-container {
display: flex;
flex-direction: column;
gap: 0.5rem;
padding: 10px;
height: 100%;
max-height: 200px; /* Add this */
overflow-y: auto; /* Change from 'auto' to 'auto' */
overflow-x: hidden; /* Add this to prevent horizontal scrollbar */
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
background-color: #f9fafb;
}

View file

@ -1,297 +0,0 @@
/*
* PowerOn | Multi-Agent Service - Log Component Styles
* Styles for execution log display
*/
/* Log container */
.log-content-container {
flex: 1;
display: flex;
flex-direction: column;
height: 100%;
}
.log-title-container {
display: flex;
align-items: center;
padding: 0.5rem 1rem;
background-color: #f8f9fa;
border-bottom: 1px solid #e5e7eb;
}
.execution-log {
flex: 1;
overflow-y: auto;
background-color: #111827;
color: #34d399;
padding: 0.75rem;
font-family: 'Consolas', 'Monaco', monospace;
font-size: 0.75rem;
height: calc(100% - 40px); /* Subtract the height of the title container */
}
/* Log entries */
.log-entry {
margin-bottom: 0;
padding: 0.4rem;
border-radius: 0;
background-color: rgba(17, 24, 39, 0.8);
position: relative;
width: 100%;
box-sizing: border-box;
word-wrap: break-word;
overflow-wrap: break-word;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
}
.log-entry:last-child {
border-bottom: none;
}
.log-entry.collapsible {
cursor: default;
}
.log-entry .log-content {
display: block !important;
margin-top: 0.3rem;
padding: 0.3rem;
background-color: rgba(255, 255, 255, 0.05);
border-radius: 0.25rem;
font-size: 0.8rem;
color: #e5e7eb;
max-width: 100%;
overflow-x: auto;
}
.log-entry .toggle-icon {
display: none;
}
/* Log entry header */
.log-header {
display: flex;
justify-content: space-between;
align-items: flex-start;
}
.log-time {
color: #9ca3af;
font-size: 0.7rem;
margin-right: 0.5rem;
white-space: nowrap;
}
.log-agent {
color: #60a5fa;
margin-right: 0.5rem;
font-weight: 500;
white-space: nowrap;
}
.log-message {
word-break: break-word;
max-width: calc(100% - 120px);
display: inline-block;
vertical-align: top;
}
/* Progress indicator */
.log-progress-container {
width: 100px;
height: 8px;
background-color: #eee;
border-radius: 4px;
margin: 0 10px;
display: inline-block;
vertical-align: middle;
position: relative;
}
.log-progress-bar {
height: 100%;
background-color: #3498db;
border-radius: 4px;
}
.log-progress-text {
position: absolute;
right: -35px;
top: -2px;
font-size: 10px;
color: #777;
}
/* Log message types */
.log-success {
color: #2ecc71 !important;
font-weight: bold !important;
}
.log-error {
color: #e74c3c !important;
font-weight: bold !important;
}
.log-warning {
color: #f39c12 !important;
font-weight: bold !important;
}
.log-info {
color: #3498db !important;
}
/* Agent-specific styles */
.log-entry.highlighted {
border-left: 3px solid #3498db !important;
background-color: rgba(52, 152, 219, 0.1) !important;
padding-left: 10px !important;
margin: 5px 0 !important;
}
.log-entry.agent-moderator {
border-left: 3px solid #9b59b6 !important;
}
.log-entry.agent-user-agent {
border-left: 3px solid #2ecc71 !important;
}
.log-entry.agent-analysis-agent {
border-left: 3px solid #f39c12 !important;
}
.log-entry.agent-coder {
border-left: 3px solid #e74c3c !important;
}
.log-entry.agent-assistant {
border-left: 3px solid #1abc9c !important;
}
/* Waiting animation */
.waiting-dots {
display: inline-block !important;
width: 24px;
height: 16px;
text-align: left !important;
font-weight: bold !important;
color: #3498db !important;
margin-left: 5px;
}
/* Log content formatting */
.log-content h1 {
font-size: 1.5rem;
color: #1f2937;
border-bottom: 1px solid #e5e7eb;
padding-bottom: 0.5rem;
margin-bottom: 1rem;
}
.log-content h2 {
font-size: 1.25rem;
color: #374151;
margin-top: 1rem;
margin-bottom: 0.75rem;
}
.log-content h3 {
font-size: 1.1rem;
color: #4b5563;
margin-top: 0.75rem;
margin-bottom: 0.5rem;
}
.log-content strong {
font-weight: 600;
color: #111827;
}
.log-content em {
font-style: italic;
color: #4b5563;
}
.log-content code {
background-color: #f3f4f6;
border-radius: 0.25rem;
padding: 0.125rem 0.25rem;
font-family: 'Consolas', monospace;
font-size: 0.875em;
color: #111827;
}
.log-content .code-block {
background-color: #111827;
color: #34d399;
border-radius: 0.25rem;
padding: 0.75rem;
font-family: 'Consolas', monospace;
font-size: 0.875rem;
margin: 0.5rem 0;
white-space: pre-wrap;
word-break: break-all;
}
.log-content ul {
list-style-type: disc;
padding-left: 1.5rem;
margin: 0.5rem 0;
}
.log-content li {
margin-bottom: 0.25rem;
line-height: 1.4;
}
.log-content .log-table {
width: 100%;
border-collapse: collapse;
margin: 1rem 0;
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
}
.log-content .log-table th,
.log-content .log-table td {
border: 1px solid #e5e7eb;
padding: 0.5rem;
text-align: left;
}
.log-content .log-table thead {
background-color: #f9fafb;
border-bottom: 2px solid #e5e7eb;
}
.log-content .log-table tr:nth-child(even) {
background-color: #f9fafb;
}
/* Empty state */
.log-empty-state {
color: #6b7280;
font-style: italic;
text-align: center;
padding: 1rem;
}
/* Scroll behavior */
.execution-log {
scrollbar-width: thin;
scrollbar-color: rgba(209, 213, 219, 0.5) transparent;
}
.execution-log::-webkit-scrollbar {
width: 6px;
}
.execution-log::-webkit-scrollbar-track {
background: transparent;
}
.execution-log::-webkit-scrollbar-thumb {
background-color: rgba(209, 213, 219, 0.5);
border-radius: 3px;
}

View file

@ -1,366 +0,0 @@
/*
* PowerOn | Multi-Agent Service - UI Component Styles
* Styles for UI elements, controls, and interactive components
*/
/* Prompt selection */
.prompt-selection {
width: 100%;
}
#prompt-select-main {
width: 100%;
padding: 0.375rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
background-color: white;
font-size: 0.875rem;
margin-bottom: 0.5rem;
}
.prompt-select-label {
display: block;
margin-bottom: 0.25rem;
font-weight: 500;
}
/* Workflow controls */
.workflow-controls {
display: flex;
gap: 0.5rem;
margin-left: auto;
}
#stop-workflow-btn {
background-color: #ef4444;
color: white;
}
#stop-workflow-btn:hover {
background-color: #dc2626;
}
#reset-btn {
background-color: #f3f4f6;
color: #4b5563;
}
#reset-btn:hover {
background-color: #e5e7eb;
}
#send-user-message-btn {
background-color: #3b82f6;
color: white;
}
#send-user-message-btn:hover {
background-color: #2563eb;
}
#send-user-message-btn.running {
background-color: #6b7280;
cursor: not-allowed;
}
/* Upload button */
#upload-additional-file-btn {
background-color: #fff;
border: 1px solid #d1d5db;
color: #4b5563;
display: flex;
align-items: center;
gap: 0.375rem;
}
#upload-additional-file-btn:hover {
background-color: #f9fafb;
border-color: #9ca3af;
}
#additional-file-input {
display: none;
}
/* Data statistics */
.data-statistics {
display: flex;
gap: 0.5rem;
font-size: 0.75rem;
color: #6b7280;
padding: 0.25rem 0.5rem;
background-color: #f9fafb;
border-radius: 0.25rem;
margin-left: auto;
}
.stat-item {
white-space: nowrap;
display: flex;
align-items: center;
gap: 0.25rem;
}
/* Workflow status indicator */
.workflow-status-indicator {
padding: 5px 10px;
border-radius: 4px;
margin-bottom: 10px;
font-weight: bold;
display: flex;
align-items: center;
}
.workflow-status-indicator.running {
background-color: rgba(52, 152, 219, 0.2);
color: #3498db;
border-left: 3px solid #3498db;
}
.workflow-status-indicator.completed {
background-color: rgba(46, 204, 113, 0.2);
color: #2ecc71;
border-left: 3px solid #2ecc71;
}
.workflow-status-indicator.stopped {
background-color: rgba(231, 76, 60, 0.2);
color: #e74c3c;
border-left: 3px solid #e74c3c;
}
.workflow-status-indicator i {
margin-right: 8px;
}
/* Input guidance and warnings */
.input-guidance {
color: #6b7280;
font-size: 0.75rem;
margin-top: 0.375rem;
}
.input-warning {
color: #b91c1c;
font-size: 0.75rem;
font-weight: 500;
margin-top: 0.375rem;
animation: fadeIn 0.3s ease;
}
/* Collapsible sections */
.collapsible-section {
overflow: hidden;
transition: max-height 0.3s ease;
}
.collapsible-header {
cursor: pointer;
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.5rem;
background-color: #f9fafb;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
}
.collapsible-header:hover {
background-color: #f3f4f6;
}
.collapsible-content {
max-height: 0;
overflow: hidden;
transition: max-height 0.3s ease;
}
.collapsible-section.expanded .collapsible-content {
max-height: 1000px; /* Arbitrary large value */
}
.collapsible-icon {
transition: transform 0.3s ease;
}
.collapsible-section.expanded .collapsible-icon {
transform: rotate(180deg);
}
/* Tooltips */
.tooltip {
position: relative;
display: inline-block;
}
.tooltip .tooltip-text {
visibility: hidden;
width: 200px;
background-color: #333;
color: #fff;
text-align: center;
border-radius: 6px;
padding: 5px;
position: absolute;
z-index: 1;
bottom: 125%;
left: 50%;
transform: translateX(-50%);
opacity: 0;
transition: opacity 0.3s;
font-size: 0.75rem;
}
.tooltip .tooltip-text::after {
content: "";
position: absolute;
top: 100%;
left: 50%;
margin-left: -5px;
border-width: 5px;
border-style: solid;
border-color: #333 transparent transparent transparent;
}
.tooltip:hover .tooltip-text {
visibility: visible;
opacity: 1;
}
/* Loading spinner */
.spinner {
width: 20px;
height: 20px;
border-radius: 50%;
border: 2px solid rgba(255, 255, 255, 0.3);
border-top-color: #fff;
animation: spin 0.8s linear infinite;
display: inline-block;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
/* Toggle button */
.toggle-switch {
position: relative;
display: inline-block;
width: 40px;
height: 24px;
}
.toggle-switch input {
opacity: 0;
width: 0;
height: 0;
}
.toggle-slider {
position: absolute;
cursor: pointer;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: #ccc;
transition: .4s;
border-radius: 24px;
}
.toggle-slider:before {
position: absolute;
content: "";
height: 16px;
width: 16px;
left: 4px;
bottom: 4px;
background-color: white;
transition: .4s;
border-radius: 50%;
}
input:checked + .toggle-slider {
background-color: #3b82f6;
}
input:focus + .toggle-slider {
box-shadow: 0 0 1px #3b82f6;
}
input:checked + .toggle-slider:before {
transform: translateX(16px);
}
/* Badge */
.badge {
display: inline-block;
padding: 0.25em 0.5em;
font-size: 0.75em;
font-weight: 500;
line-height: 1;
text-align: center;
white-space: nowrap;
vertical-align: baseline;
border-radius: 0.25rem;
margin-left: 0.5rem;
}
.badge-primary {
background-color: #3b82f6;
color: white;
}
.badge-secondary {
background-color: #9ca3af;
color: white;
}
.badge-success {
background-color: #10b981;
color: white;
}
.badge-danger {
background-color: #ef4444;
color: white;
}
.badge-warning {
background-color: #f59e0b;
color: white;
}
.badge-info {
background-color: #0ea5e9;
color: white;
}
/* Animation keyframes */
@keyframes fadeIn {
from { opacity: 0; }
to { opacity: 1; }
}
@keyframes slideInUp {
from { transform: translateY(10px); opacity: 0; }
to { transform: translateY(0); opacity: 1; }
}
@keyframes pulse {
0% { opacity: 1; }
50% { opacity: 0.5; }
100% { opacity: 1; }
}
.fade-in {
animation: fadeIn 0.3s ease;
}
.slide-in-up {
animation: slideInUp 0.3s ease;
}
.pulse {
animation: pulse 1.5s infinite;
}

View file

@ -1,116 +0,0 @@
<!-- Improved Workflow Interface with 3-section layout -->
<div class="workflow-container">
<!-- HEADER SECTION - Fixed at top -->
<div class="workflow-header">
<div class="header-columns">
<div class="header-left">
<div class="header-content-wrapper">
<div class="section-toggle">
<button id="toggle-header-btn" class="btn btn-sm btn-outline-secondary">
<i class="fas fa-minus"></i>
</button>
</div>
<div class="log-content-container">
<div id="execution-log" class="execution-log"></div>
</div>
</div>
</div>
<div class="header-right">
<div class="header-right-content">
<div id="data-statistics" class="data-statistics">
<span class="stat-item">↑ 0 B</span>
<span class="stat-item">↓ 0 B</span>
</div>
</div>
</div>
</div>
</div>
<!-- CHAT SECTION - Fills remaining space -->
<div class="chat-section">
<div class="chat-columns">
<!-- Left column (60%) - Chat history -->
<div class="chat-left">
<!-- Empty state when no chat -->
<div id="empty-chat-state" class="empty-state">
<div class="empty-state-content">
<i class="fas fa-comments fa-3x"></i>
<h4>Noch keine Kommunikation</h4>
<p>Starten Sie einen Workflow, um den Multi-Agent-Chat zu sehen.</p>
</div>
</div>
<!-- Scrollable chat messages -->
<div id="agent-chat-messages" class="agent-chat-messages"></div>
</div>
<!-- Right column (40%) - File preview -->
<div class="chat-right">
<div id="file-preview-container" class="file-preview-container">
<div class="file-preview-header">
<h4>Dateivorschau</h4>
<div class="file-preview-actions">
<button id="download-file-btn" class="btn btn-sm btn-outline-primary" disabled>
<i class="fas fa-download"></i>
</button>
<button id="copy-file-btn" class="btn btn-sm btn-outline-secondary" disabled>
<i class="fas fa-copy"></i>
</button>
<button id="close-file-preview-btn" class="btn btn-sm btn-outline-secondary">
<i class="fas fa-times"></i>
</button>
</div>
</div>
<div id="file-preview-content" class="file-preview-content"></div>
</div>
</div>
</div>
</div>
<!-- FOOTER SECTION - Fixed at bottom -->
<div class="workflow-footer">
<div class="footer-columns">
<!-- Left column (60%) - Prompt area -->
<div class="footer-left">
<div id="user-input-area" class="user-input-area file-dropzone-wrapper">
<div class="user-input-container">
<div class="prompt-selection">
<select id="prompt-select-main" class="form-control">
<option value="">Prompt-Vorlage wählen</option>
</select>
</div>
<textarea
id="user-message-input"
class="form-control"
placeholder="Beschreiben Sie die Aufgabe für die Agenten..."
></textarea>
<div class="user-input-actions">
<div class="file-actions">
<button id="upload-additional-file-btn" class="btn btn-outline-secondary">
<i class="fas fa-paperclip"></i> Dateien
</button>
<input type="file" id="additional-file-input" multiple style="display: none;" />
</div>
<div class="workflow-controls">
<button id="stop-workflow-btn" class="btn btn-danger">
<i class="fas fa-stop"></i>Stop
</button>
<button id="reset-btn" class="btn btn-outline-secondary">
<i class="fas fa-redo"></i>Reset
</button>
<button id="send-user-message-btn" class="btn btn-primary">
<i class="fas fa-play"></i>Start
</button>
</div>
</div>
</div>
</div>
</div>
<!-- Right column (40%) - Uploaded files -->
<div class="footer-right">
<div id="additional-files-container" class="additional-files-container"></div>
</div>
</div>
</div>
</div>

View file

@ -1,374 +0,0 @@
/*
* PowerOn | Multi-Agent Service - Core Workflow Styles
* Core container layout and basic structure styles
*/
/* Main container layout */
.workflow-container {
display: flex;
flex-direction: column;
height: 100vh;
width: 100%;
overflow: hidden;
}
/* HEADER SECTION */
.workflow-header {
flex: 0 0 auto;
position: sticky;
top: 0;
z-index: 100;
background-color: white;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
border-bottom: 1px solid #e5e7eb;
transition: all 0.3s ease;
height: 200px; /* Set default height for header section */
}
.workflow-header.collapsed {
height: 50px;
overflow: hidden;
}
.header-columns {
display: flex;
width: 100%;
height: 100%;
}
.header-left {
width: 70%;
border-right: 1px solid #e5e7eb;
height: 100%;
transition: all 0.3s ease;
display: flex;
}
.header-right {
width: 30%;
padding: 1rem;
display: flex;
align-items: flex-start;
justify-content: flex-end;
}
.header-content-wrapper {
display: flex;
width: 100%;
height: 100%;
}
.section-toggle {
width: 40px;
background-color: #f8f9fa;
display: flex;
justify-content: center;
padding-top: 10px;
border-right: 1px solid #e5e7eb;
}
/* CHAT SECTION */
.chat-section {
flex: 1;
min-height: 0; /* Important for flex child scrolling */
overflow: hidden;
}
.chat-columns {
display: flex;
height: 100%;
}
.chat-left {
width: 70%;
display: flex;
flex-direction: column;
border-right: 1px solid #e5e7eb;
height: 100%;
overflow: hidden;
}
.chat-right {
width: 30%;
height: 100%;
overflow: hidden;
}
.empty-state {
flex: 1;
display: flex;
align-items: center;
justify-content: center;
color: #9ca3af;
padding: 2rem;
}
.empty-state-content {
text-align: center;
}
/* FOOTER SECTION */
.workflow-footer {
flex: 0 0 auto;
position: sticky;
bottom: 0;
z-index: 100;
background-color: white;
box-shadow: 0 -2px 4px rgba(0,0,0,0.05);
border-top: 1px solid #e5e7eb;
max-height: 25vh;
transition: all 0.3s ease;
}
.footer-columns {
display: flex;
width: 100%;
}
.footer-left {
width: 70%;
border-right: 1px solid #e5e7eb;
}
.footer-right {
width: 30%;
padding: 1rem;
overflow-y: auto;
}
/* Button styles */
.btn {
display: inline-flex;
align-items: center;
justify-content: center;
gap: 0.375rem;
padding: 0.5rem 0.75rem;
border-radius: 0.25rem;
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: all 0.2s ease;
}
.btn-sm {
padding: 0.25rem 0.5rem;
font-size: 0.75rem;
}
.btn-outline-secondary {
background-color: transparent;
border: 1px solid #d1d5db;
color: #4b5563;
}
.btn-outline-secondary:hover {
background-color: #f3f4f6;
}
.btn-outline-primary {
background-color: transparent;
border: 1px solid #3b82f6;
color: #3b82f6;
}
.btn-outline-primary:hover {
background-color: #eff6ff;
}
.btn-primary {
background-color: #3b82f6;
border: 1px solid #3b82f6;
color: white;
}
.btn-primary:hover {
background-color: #2563eb;
border-color: #2563eb;
}
.btn-danger {
background-color: #ef4444;
border: 1px solid #ef4444;
color: white;
}
.btn-danger:hover {
background-color: #dc2626;
border-color: #dc2626;
}
.btn:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Toast notification styling */
#toast-container {
position: fixed;
bottom: 20px;
right: 20px;
z-index: 1000;
display: flex;
flex-direction: column;
gap: 10px;
}
.toast {
min-width: 250px;
max-width: 350px;
background-color: white;
color: #333;
border-radius: 4px;
padding: 12px 15px;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.2);
opacity: 1;
transition: all 0.3s ease;
border-left: 4px solid #4CAF50;
}
.toast.error {
border-left-color: #F44336;
}
.toast.warning {
border-left-color: #FF9800;
}
.toast.info {
border-left-color: #2196F3;
}
.toast.hide {
opacity: 0;
transform: translateX(30px);
}
.toast-content {
display: flex;
align-items: center;
}
.toast-content i {
margin-right: 10px;
font-size: 18px;
}
.toast.success i {
color: #4CAF50;
}
.toast.error i {
color: #F44336;
}
.toast.warning i {
color: #FF9800;
}
.toast.info i {
color: #2196F3;
}
/* Error message */
.workflow-error-message {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background-color: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.15);
max-width: 500px;
width: 90%;
z-index: 1000;
}
.error-content {
display: flex;
flex-direction: column;
align-items: center;
text-align: center;
}
.error-content i {
font-size: 48px;
color: #ef4444;
margin-bottom: 16px;
}
.error-content h3 {
margin: 0 0 12px 0;
color: #111827;
}
.error-content p {
margin: 0;
color: #4b5563;
}
/* Animation keyframes */
@keyframes fadeIn {
from { opacity: 0; }
to { opacity: 1; }
}
@keyframes fadeOut {
from { opacity: 1; transform: translateY(0); }
to { opacity: 0; transform: translateY(-10px); }
}
/* Responsive adjustments */
@media (max-width: 992px) {
.header-columns, .chat-columns, .footer-columns {
flex-direction: column;
}
.header-left, .header-right, .chat-left, .chat-right, .footer-left, .footer-right {
width: 100%;
border-right: none;
}
.header-left, .chat-left, .footer-left {
border-bottom: 1px solid #e5e7eb;
}
.workflow-footer {
max-height: 50vh;
}
.header-content-wrapper {
flex-direction: column;
}
.section-toggle {
width: 100%;
height: 40px;
padding-top: 0;
padding-left: 10px;
justify-content: flex-start;
align-items: center;
border-right: none;
border-bottom: 1px solid #e5e7eb;
}
}
/* Common scrollbar styling */
.workflow-container ::-webkit-scrollbar {
width: 6px;
height: 6px;
}
.workflow-container ::-webkit-scrollbar-track {
background: transparent;
}
.workflow-container ::-webkit-scrollbar-thumb {
background-color: #d1d5db;
border-radius: 3px;
}
.workflow-container {
scrollbar-width: thin;
scrollbar-color: #d1d5db transparent;
}

View file

@ -1,613 +0,0 @@
/*
* PowerOn | Multi-Agent Service - Hauptstyles
* Optimiert für Expertenansicht mit mehr Platz für den Workflow
*/
/* ===== GRUNDLEGENDE RESETS UND BASIS-STYLES ===== */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background-color: #f0f2f5;
color: #333;
line-height: 1.5;
font-size: 14px;
}
ul {
list-style: none;
}
a {
text-decoration: none;
color: inherit;
}
button {
cursor: pointer;
border: none;
background: none;
font-family: inherit;
}
h2 {
font-size: 1.25rem;
font-weight: 600;
margin-bottom: 0.75rem;
}
h3 {
font-size: 1rem;
font-weight: 600;
margin-bottom: 0.75rem;
color: #4b5563;
}
h4 {
font-size: 0.875rem;
font-weight: 600;
margin-bottom: 0.5rem;
color: #6b7280;
}
/* ===== LAYOUT COMPONENTS ===== */
/* Navbar */
.navbar {
background-color: #2563eb;
color: white;
padding: 0.5rem 1rem;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
height: 3rem;
}
.navbar-container {
display: flex;
justify-content: space-between;
align-items: center;
max-width: 1800px;
margin: 0 auto;
height: 100%;
}
.navbar-logo {
font-size: 1.25rem;
font-weight: 600;
}
.navbar-user {
display: flex;
align-items: center;
gap: 0.5rem;
}
/* App Container Layout */
.app-container {
display: flex;
max-width: 1800px;
margin: 0 auto;
min-height: calc(100vh - 3rem);
/* Entfernung von eventuellen Abständen */
padding: 0;
gap: 0;
}
/* Main Content */
.main-content {
flex: 1;
padding: 0.75rem 1rem;
display: flex;
flex-direction: column; /* Stellt sicher, dass der Inhalt vertikal fließt */
/* Entfernung aller Abstandswerte */
margin: 0;
border-left: none; /* Falls eine Grenze existiert */
}
/* ===== COMMON COMPONENTS ===== */
/* Buttons */
.btn, .add-btn {
display: inline-flex;
align-items: center;
gap: 0.25rem;
padding: 0.375rem 0.75rem;
border-radius: 0.25rem;
font-size: 0.875rem;
font-weight: 500;
transition: background-color 0.2s;
}
.btn-sm {
padding: 0.25rem 0.5rem;
font-size: 0.75rem;
}
.icon-button {
display: flex;
align-items: center;
justify-content: center;
width: 1.75rem;
height: 1.75rem;
border-radius: 50%;
color: white;
transition: background-color 0.2s;
}
.icon-button:hover {
background-color: rgba(255, 255, 255, 0.2);
}
/* Primary Buttons */
.add-btn {
background-color: #10b981;
color: white;
}
.add-btn:hover {
background-color: #059669;
}
/* Action Buttons Container */
.action-buttons {
display: flex;
justify-content: flex-end;
gap: 0.5rem;
margin-bottom: 0.75rem;
}
/* List Actions Buttons */
.edit-btn, .edit-agent-btn, .edit-workspace-btn, .edit-user-btn, .edit-mandate-btn {
background-color: #3b82f6;
color: white;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.75rem;
}
.edit-btn:hover, .edit-agent-btn:hover, .edit-workspace-btn:hover, .edit-user-btn:hover, .edit-mandate-btn:hover {
background-color: #2563eb;
}
.delete-btn, .delete-agent-btn, .delete-workspace-btn, .delete-user-btn, .delete-mandate-btn, .delete-file-btn, .delete-prompt-btn {
background-color: #ef4444;
color: white;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.75rem;
}
.delete-btn:hover, .delete-agent-btn:hover, .delete-workspace-btn:hover, .delete-user-btn:hover, .delete-mandate-btn:hover, .delete-file-btn:hover, .delete-prompt-btn:hover {
background-color: #dc2626;
}
.activate-btn, .activate-workspace-btn, .activate-user-btn, .use-prompt-btn {
background-color: #10b981;
color: white;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.75rem;
}
.activate-btn:hover, .activate-workspace-btn:hover, .activate-user-btn:hover, .use-prompt-btn:hover {
background-color: #059669;
}
/* Cards */
.card {
background-color: white;
border-radius: 0.375rem;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
padding: 0.75rem;
margin-bottom: 0.75rem;
}
/* Section Headers */
.section-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0.75rem;
}
/* ===== SPECIFIC COMPONENTS ===== */
/* Files Module */
.files-tabs {
display: flex;
margin-bottom: 1rem;
border-bottom: 1px solid #e5e7eb;
}
.tab-btn {
padding: 0.5rem 1rem;
border-bottom: 2px solid transparent;
cursor: pointer;
font-weight: 500;
}
.tab-btn.active {
border-bottom-color: #3b82f6;
color: #3b82f6;
}
.search-bar {
display: flex;
margin-bottom: 1rem;
}
.search-bar input {
flex: 1;
padding: 0.5rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem 0 0 0.25rem;
}
.search-bar button {
padding: 0.5rem 0.75rem;
background: #3b82f6;
color: white;
border-radius: 0 0.25rem 0.25rem 0;
}
.files-list-container {
margin-top: 1rem;
}
.files-grid-header {
display: grid;
grid-template-columns: 2fr 1fr 1fr 1fr 1fr;
background-color: #f3f4f6;
padding: 0.5rem;
font-weight: 500;
border-radius: 0.25rem 0.25rem 0 0;
}
.files-list {
border: 1px solid #e5e7eb;
border-radius: 0 0 0.25rem 0.25rem;
}
/* File header columns */
.file-header-name, .file-header-type, .file-header-size, .file-header-date, .file-header-actions {
padding: 0.5rem;
}
.refresh-btn {
background-color: #f3f4f6;
color: #4b5563;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.875rem;
}
.refresh-btn:hover {
background-color: #e5e7eb;
}
.module-wrapper {
padding: 0.5rem;
}
.module-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 1rem;
}
.module-content {
margin-bottom: 1rem;
}
.module-actions {
display: flex;
gap: 0.5rem;
}
.upload-section {
margin-bottom: 1rem;
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.upload-info {
font-size: 0.75rem;
color: #6b7280;
}
/* Module Container */
.module-container {
height: calc(100vh - 5rem); /* Volle Höhe für Module */
overflow-y: auto;
}
/* ===== LIST ITEM COMPONENTS ===== */
/* Common List Items */
.list-item, .agent-list-item, .workspace-list-item, .prompt-item, .user-item, .mandate-item {
background-color: white;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
padding: 0.75rem;
margin-bottom: 0.75rem;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.list-item:hover, .agent-list-item:hover, .workspace-list-item:hover, .prompt-item:hover, .user-item:hover, .mandate-item:hover {
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.list-header, .agent-header, .workspace-header, .prompt-header, .user-header, .mandate-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0.5rem;
}
.list-body, .agent-description, .workspace-meta, .prompt-meta, .user-meta, .mandate-meta {
font-size: 0.75rem;
color: #6b7280;
margin-bottom: 0.5rem;
}
.list-actions, .agent-actions, .workspace-actions, .prompt-actions, .user-actions, .mandate-actions {
display: flex;
justify-content: flex-end;
gap: 0.375rem;
}
/* Badges and Tags */
.badge, .tag, .agent-type, .workspace-type, .mandate-language, .user-badge {
font-size: 0.7rem;
padding: 0.125rem 0.375rem;
border-radius: 9999px;
display: inline-flex;
align-items: center;
}
.badge-blue, .agent-type, .workspace-type {
background-color: #e0f2fe;
color: #0369a1;
}
.badge-gray {
background-color: #f3f4f6;
color: #4b5563;
}
.badge-green, .user-badge.active {
background-color: #dcfce7;
color: #16a34a;
}
.badge-red, .user-badge.disabled {
background-color: #fee2e2;
color: #dc2626;
}
.badge-purple, .user-badge.sysadmin {
background-color: #f3e8ff;
color: #7e22ce;
}
/* ===== FORMS AND MODALS ===== */
/* Form Groups */
.form-group {
margin-bottom: 0.75rem;
}
.form-group label {
display: block;
font-weight: 500;
margin-bottom: 0.375rem;
font-size: 0.875rem;
color: #4b5563;
}
.form-control {
width: 100%;
padding: 0.5rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
font-size: 0.875rem;
}
.form-control:focus {
outline: none;
border-color: #2563eb;
box-shadow: 0 0 0 2px rgba(37, 99, 235, 0.1);
}
/* Checkboxes in Formularen */
.form-field.checkbox {
display: flex;
align-items: center;
}
.form-field.checkbox input[type="checkbox"] {
margin-right: 0.5rem;
width: auto;
}
.form-field.checkbox label, .checkbox-label {
margin-bottom: 0;
display: flex;
align-items: center;
cursor: pointer;
}
/* Feld-Beschreibungen */
.field-description {
font-size: 0.75rem;
color: #6b7280;
margin-top: 0.25rem;
}
/* Modals */
.modal-overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(0, 0, 0, 0.5);
display: flex;
justify-content: center;
align-items: center;
z-index: 1000;
}
.modal-container {
background-color: white;
border-radius: 0.375rem;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.2);
width: 90%;
max-width: 500px;
max-height: 90vh;
overflow-y: auto;
}
.modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.75rem;
border-bottom: 1px solid #e0e0e0;
}
.modal-header h3 {
margin: 0;
font-size: 1rem;
}
.close-modal-btn {
background: none;
border: none;
font-size: 1.25rem;
cursor: pointer;
}
.modal-body {
padding: 0.75rem;
}
.modal-footer {
padding: 0.75rem;
display: flex;
justify-content: flex-end;
gap: 0.5rem;
border-top: 1px solid #e0e0e0;
}
/* ===== LOGIN & REGISTRATION ===== */
.login-container {
max-width: 400px;
margin: 80px auto;
padding: 1.5rem;
background-color: white;
border-radius: 0.375rem;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.login-container h1 {
text-align: center;
margin-bottom: 1.5rem;
color: #2563eb;
font-size: 1.5rem;
}
.login-form-group {
margin-bottom: 1rem;
}
.login-form-group label {
display: block;
margin-bottom: 0.375rem;
font-weight: 500;
font-size: 0.875rem;
color: #4b5563;
}
.login-form-group input {
width: 100%;
padding: 0.625rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
font-size: 0.875rem;
}
.login-form-group input:focus {
outline: none;
border-color: #2563eb;
box-shadow: 0 0 0 2px rgba(37, 99, 235, 0.1);
}
.login-btn {
width: 100%;
padding: 0.625rem;
background-color: #2563eb;
color: white;
border: none;
border-radius: 0.25rem;
font-weight: 500;
cursor: pointer;
transition: background-color 0.2s;
margin-top: 0.5rem;
}
.login-btn:hover {
background-color: #1d4ed8;
}
.register-link {
margin-top: 1rem;
text-align: center;
font-size: 0.875rem;
color: #6b7280;
}
.register-link a {
color: #2563eb;
font-weight: 500;
}
.register-link a:hover {
text-decoration: underline;
}
.login-error-message {
padding: 0.625rem;
margin-bottom: 1rem;
border-radius: 0.25rem;
font-size: 0.875rem;
background-color: #fef2f2;
color: #ef4444;
border: 1px solid #fecaca;
}
.auth-page {
min-height: 100vh;
display: flex;
justify-content: center;
align-items: center;
background-color: #f3f4f6;
}
/* Animation für Formularwechsel */
#login-container, #register-container {
transition: all 0.3s ease;
}
/* Responsive Anpassungen */
@media (max-width: 480px) {
.login-container {
max-width: 100%;
margin: 40px 20px;
padding: 1.25rem;
}
.login-container h1 {
font-size: 1.25rem;
}
}

View file

@ -1,443 +0,0 @@
/**
* Ausgelagerte Styles für das generische Entitätsmodul
* Optimiert für kompakte Buttons ohne Zeilenumbruch
*/
/* ===== ENTITY TABLE STYLES ===== */
.entity-table-wrapper {
width: 100%;
overflow-x: auto;
}
.entity-actions {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0.75rem;
}
.entity-bulk-actions {
display: flex;
align-items: center;
gap: 0.5rem;
}
.entity-selection-count {
font-size: 0.75rem;
color: #6b7280;
}
.entity-filter {
display: flex;
align-items: center;
}
.entity-search {
padding: 0.375rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
margin-right: 0.5rem;
font-size: 0.875rem;
}
.entity-table-container {
margin-bottom: 1rem;
border: 1px solid #e5e7eb;
border-radius: 0.25rem;
overflow: hidden;
}
.entity-table {
width: 100%;
border-collapse: collapse;
}
.entity-table th,
.entity-table td {
padding: 0.5rem;
text-align: left;
border-bottom: 1px solid #e5e7eb;
}
.entity-table th {
background-color: #f9fafb;
font-weight: 500;
}
.entity-table th.sortable {
cursor: pointer;
}
.entity-table th.sortable:hover {
background-color: #f3f4f6;
}
/* Checkbox und ID Columns */
.entity-checkbox-column {
width: 36px;
}
.entity-id-column {
width: 50px;
}
/* Aktionen-Spalte breiter und mit fester Breite */
.entity-actions-column {
width: 120px; /* Feste Breite */
min-width: 120px; /* Mindestbreite, verhindert Umbrüche */
}
.entity-checkbox-cell,
.entity-id-cell {
white-space: nowrap;
}
/* Tabellenzelle für Aktionen mit fester Breite */
.entity-actions-cell {
white-space: nowrap;
width: 120px;
min-width: 120px;
max-width: 120px;
}
/* Pagination */
.entity-pagination {
display: flex;
align-items: center;
justify-content: center;
gap: 0.5rem;
font-size: 0.875rem;
}
.entity-page-prev,
.entity-page-next {
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
background-color: #f3f4f6;
}
.entity-page-prev:disabled,
.entity-page-next:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.entity-items-per-page {
padding: 0.25rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
}
/* Empty and Loading States */
.entity-empty-state {
text-align: center;
padding: 2rem;
}
.entity-add-btn-empty {
margin-top: 0.5rem;
padding: 0.375rem 0.75rem;
background-color: #3b82f6;
color: white;
border-radius: 0.25rem;
}
.entity-loading {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 2rem;
}
.entity-spinner {
width: 2rem;
height: 2rem;
border: 3px solid #e5e7eb;
border-top-color: #3b82f6;
border-radius: 50%;
animation: spin 1s linear infinite;
margin-bottom: 0.5rem;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
.entity-error {
background-color: #fee2e2;
color: #b91c1c;
padding: 0.75rem;
border-radius: 0.25rem;
margin-bottom: 1rem;
}
.entity-retry-btn {
background-color: #ef4444;
color: white;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
margin-top: 0.5rem;
}
/* Row Actions Container - nebeneinander ohne Umbruch */
.entity-row-actions,
.entity-custom-actions {
display: flex;
flex-wrap: nowrap; /* Verhindert Umbrüche */
gap: 4px; /* Kleinerer Abstand */
}
/* Kleinere optimierte Button-Styles */
.entity-action-btn,
.entity-view-btn,
.entity-edit-btn,
.entity-delete-btn,
.entity-custom-btn,
.entity-use-btn,
.entity-copy-btn,
.entity-activate-btn,
.entity-deactivate-btn,
.entity-reset-btn,
.entity-download-btn,
.entity-default-btn {
display: inline-flex;
align-items: center;
justify-content: center;
width: 1.25rem; /* Kleinere Buttons */
height: 1.25rem; /* Kleinere Buttons */
border-radius: 0.25rem;
color: white;
font-size: 0.7rem; /* Kleinere Icons */
position: relative;
padding: 0; /* Kein Padding */
margin: 0 1px; /* Kleinerer Rand */
min-width: 1.25rem; /* Feste Mindestbreite */
}
/* Tooltips für Buttons ohne Text - nach unten positioniert für bessere Sichtbarkeit */
.entity-action-btn::before,
.entity-view-btn::before,
.entity-edit-btn::before,
.entity-delete-btn::before,
.entity-custom-btn::before,
.entity-use-btn::before,
.entity-copy-btn::before,
.entity-activate-btn::before,
.entity-deactivate-btn::before,
.entity-reset-btn::before,
.entity-download-btn::before,
.entity-default-btn::before {
content: attr(data-tooltip);
position: absolute;
bottom: -25px; /* Unter dem Button anzeigen statt oben */
left: 50%;
transform: translateX(-50%);
background-color: #333;
color: white;
padding: 2px 6px;
border-radius: 3px;
font-size: 0.7rem;
white-space: nowrap;
visibility: hidden;
opacity: 0;
transition: opacity 0.3s;
z-index: 10; /* Höherer z-index */
}
.entity-action-btn:hover::before,
.entity-view-btn:hover::before,
.entity-edit-btn:hover::before,
.entity-delete-btn:hover::before,
.entity-custom-btn:hover::before,
.entity-use-btn:hover::before,
.entity-copy-btn:hover::before,
.entity-activate-btn:hover::before,
.entity-deactivate-btn:hover::before,
.entity-reset-btn:hover::before,
.entity-download-btn:hover::before,
.entity-default-btn:hover::before {
visibility: visible;
opacity: 1;
}
/* Farben für verschiedene Button-Typen */
.entity-view-btn { background-color: #3b82f6; }
.entity-edit-btn { background-color: #10b981; }
.entity-delete-btn { background-color: #ef4444; }
.entity-copy-btn { background-color: #6b7280; }
.entity-use-btn { background-color: #059669; }
.entity-activate-btn { background-color: #16a34a; }
.entity-deactivate-btn { background-color: #dc2626; }
.entity-reset-btn { background-color: #f59e0b; }
.entity-download-btn { background-color: #8b5cf6; }
.entity-default-btn { background-color: #f59e0b; }
.entity-add-btn {
background-color: #3b82f6;
color: white;
padding: 0.375rem 0.75rem;
border-radius: 0.25rem;
font-size: 0.875rem;
display: inline-flex;
align-items: center;
gap: 0.25rem;
}
.entity-add-btn:hover {
background-color: #2563eb;
}
.entity-bulk-delete-btn {
background-color: #ef4444;
color: white;
padding: 0.25rem 0.5rem;
border-radius: 0.25rem;
font-size: 0.75rem;
}
.entity-bulk-delete-btn:hover {
background-color: #dc2626;
}
/* ===== ENTITY MODAL STYLES ===== */
.entity-modal {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(0, 0, 0, 0.5);
display: flex;
justify-content: center;
align-items: center;
z-index: 1000;
}
.entity-modal-content {
background-color: white;
border-radius: 0.375rem;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.2);
width: 90%;
max-width: 500px;
max-height: 90vh;
overflow-y: auto;
}
.entity-modal-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.75rem;
border-bottom: 1px solid #e0e0e0;
}
.entity-modal-header h3 {
margin: 0;
font-size: 1rem;
}
.entity-modal-close {
background: none;
border: none;
font-size: 1.25rem;
cursor: pointer;
}
.entity-modal-body {
padding: 0.75rem;
}
.entity-modal-footer {
padding: 0.75rem;
display: flex;
justify-content: flex-end;
gap: 0.5rem;
border-top: 1px solid #e0e0e0;
}
.entity-modal-save,
.entity-modal-cancel,
.entity-modal-edit,
.entity-modal-delete {
padding: 0.375rem 0.75rem;
border-radius: 0.25rem;
font-size: 0.875rem;
cursor: pointer;
}
.entity-modal-save,
.entity-modal-edit {
background-color: #2563eb;
color: white;
}
.entity-modal-cancel {
background-color: #f3f4f6;
color: #4b5563;
}
.entity-modal-delete {
background-color: #ef4444;
color: white;
}
/* Entity form */
.entity-form {
width: 100%;
}
.entity-form-fields {
display: flex;
flex-direction: column;
gap: 0.75rem;
}
.entity-form-group {
margin-bottom: 0.5rem;
}
.entity-form-group label {
display: block;
font-weight: 500;
margin-bottom: 0.375rem;
font-size: 0.875rem;
}
.entity-form-group input,
.entity-form-group textarea,
.entity-form-group select {
width: 100%;
padding: 0.5rem;
border: 1px solid #d1d5db;
border-radius: 0.25rem;
font-size: 0.875rem;
}
/* Entity Details */
.entity-details {
margin-bottom: 1rem;
}
.entity-detail-row {
display: flex;
margin-bottom: 0.5rem;
border-bottom: 1px solid #f3f4f6;
padding-bottom: 0.5rem;
}
.entity-detail-label {
font-weight: 500;
width: 30%;
color: #4b5563;
}
.entity-detail-value {
width: 70%;
}
.entity-custom-details {
margin-top: 1rem;
padding-top: 1rem;
border-top: 1px solid #e5e7eb;
}

View file

@ -1,358 +0,0 @@
/*
* PowerOn | Multi-Agent Service - Navigation CSS
* Optimierte Styles für die Navigationskomponenten
*/
/* ===== NAVIGATION COMPONENTS ===== */
/* Sidebar */
.sidebar {
width: 250px;
background-color: #2c3e50; /* Dunklerer Hintergrund für besseren Kontrast */
color: #ecf0f1; /* Hellere Textfarbe für bessere Lesbarkeit */
box-shadow: 2px 0 10px rgba(0, 0, 0, 0.1);
padding: 0; /* Padding entfernen, um konsistentere Struktur zu haben */
flex-shrink: 0;
font-size: 0.875rem;
height: 100vh; /* Volle Höhe */
overflow-y: auto; /* Scrollbar bei Bedarf */
}
/* Sidebar-Header verbessern */
.sidebar-header {
padding: 1.5rem 1rem;
margin-bottom: 0;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
background-color: #1a2530; /* Noch dunklerer Hintergrund für Header */
}
.sidebar-header h2 {
color: #3498db; /* Markenfarbe für Haupttitel */
margin-bottom: 1rem;
font-size: 1.4rem;
font-weight: 600;
}
/* Current Workspace styling */
.sidebar .current-workspace {
padding: 0.75rem 1rem;
margin: 0;
font-weight: 500;
color: white;
background-color: rgba(52, 152, 219, 0.2); /* Leicht blaues Highlight */
border-radius: 0;
border-left: 4px solid #3498db; /* Seitlicher Akzent */
}
/* Current User styling */
.sidebar .current-user {
padding: 0.5rem 1rem;
color: #bdc3c7;
font-size: 0.8rem;
}
/* Navigation Container verbessern */
.navigation-container {
padding: 0;
}
/* Hauptnavigation */
#main-navigation {
margin: 0;
padding: 0;
}
/* Navigationsitems einheitlicher gestalten */
.nav-item {
border-left: 4px solid transparent;
transition: all 0.2s ease;
}
.nav-item a {
padding: 0.75rem 1rem;
display: flex;
align-items: center;
color: #ecf0f1;
transition: all 0.2s ease;
}
.nav-item:hover {
background-color: rgba(255, 255, 255, 0.05);
border-left-color: rgba(52, 152, 219, 0.5);
}
.nav-item.active {
background-color: rgba(52, 152, 219, 0.2);
border-left-color: #3498db;
}
.nav-item i {
width: 20px;
margin-right: 10px;
text-align: center;
color: #3498db; /* Icons in Markenfarbe */
}
/* Dropdown und Collapse Pfeile */
.fa-chevron-down, .fa-caret-down {
transition: transform 0.3s ease;
margin-left: auto;
}
.collapsed .fa-chevron-down {
transform: rotate(-90deg);
}
/* Untermenüpunkte besser einrücken */
.nav-item[data-level="1"] {
background-color: rgba(0, 0, 0, 0.1);
}
.nav-item[data-level="2"] {
background-color: rgba(0, 0, 0, 0.2);
}
/* Workspace wählen, Administration, etc. */
.nav-action-btn {
width: 100%;
text-align: left;
padding: 0.75rem 1rem;
color: #ecf0f1;
display: flex;
align-items: center;
transition: all 0.2s ease;
}
.nav-action-btn:hover {
background-color: rgba(255, 255, 255, 0.05);
}
.nav-action-btn i {
width: 20px;
margin-right: 10px;
text-align: center;
color: #3498db;
}
/* Bereichs-Überschriften in der Sidebar */
.nav-section-header {
padding: 0.75rem 1rem;
margin-top: 1rem;
color: #3498db;
font-weight: 600;
text-transform: uppercase;
font-size: 0.75rem;
letter-spacing: 1px;
border-bottom: 1px solid rgba(255, 255, 255, 0.05);
}
/* Spezial-Styling für Verwaltungsbereich */
[data-module="verwaltung"] > a,
[data-module="administration"] > a {
color: #e74c3c;
}
[data-module="verwaltung"] > a i,
[data-module="administration"] > a i {
color: #e74c3c;
}
/* Einstellungen am Ende der Seitenleiste */
[data-module="einstellungen"] {
margin-top: auto; /* Push to bottom */
}
/* Hover-Effekte und Fokus-Zustände für bessere Interaktivität */
.nav-item a:focus,
.nav-action-btn:focus {
outline: none;
background-color: rgba(255, 255, 255, 0.1);
}
/* Workspace-Liste und Items */
.workspace-list {
margin-bottom: 1rem;
max-height: 200px;
overflow-y: auto;
background-color: rgba(0, 0, 0, 0.1);
}
.workspace-item, .sidebar-item {
padding: 0.5rem 1rem;
margin-bottom: 0;
display: flex;
align-items: center;
cursor: pointer;
color: #ecf0f1;
transition: all 0.2s ease;
border-left: 4px solid transparent;
}
.workspace-item i, .sidebar-item i {
margin-right: 0.5rem;
width: 1rem;
color: #3498db;
}
.workspace-item:hover, .sidebar-item:hover {
background-color: rgba(255, 255, 255, 0.05);
border-left-color: rgba(52, 152, 219, 0.5);
}
.workspace-item.active, .sidebar-item.active {
background-color: rgba(52, 152, 219, 0.2);
border-left-color: #3498db;
}
.sidebar-item a {
display: flex;
align-items: center;
width: 100%;
padding: 0.5rem 0;
color: #ecf0f1;
}
/* Workspace-Dropdown stylen */
.workspace-dropdown {
background-color: #1a2530;
border-radius: 0;
border: none;
margin: 0;
padding: 0;
}
.workspace-selector-header {
padding: 0.75rem 1rem;
color: #ecf0f1;
display: flex;
align-items: start;
justify-content: start;
}
.workspace-selector-header i.fa-chevron-down {
margin-left: auto;
}
/* Admin Navigation */
.admin-nav {
margin-top: 1rem;
padding-top: 0.5rem;
border-top: 1px solid rgba(255, 255, 255, 0.1);
}
.admin-nav h3 {
padding: 0.5rem 1rem;
color: #e74c3c;
font-size: 0.9rem;
}
.admin-nav-items {
margin-top: 0.25rem;
}
.sidebar-nav {
margin-top: 0.5rem;
}
/* Top Navigation */
.top-nav {
display: flex;
justify-content: space-between;
padding: 0.5rem 1rem;
background-color: #fff;
border-bottom: 1px solid #e5e7eb;
margin-bottom: 0.75rem;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
}
.current-workspace {
font-weight: 600;
color: #1d4ed8;
}
.user-menu {
display: flex;
align-items: center;
gap: 0.5rem;
}
.top-nav-items {
display: flex;
list-style: none;
margin: 0;
padding: 0;
}
.top-nav-item {
margin-left: 1rem;
}
/* Dropdown-Menü Styles */
.dropdown {
position: relative;
display: inline-block;
}
.dropdown-toggle {
display: flex;
align-items: center;
padding: 0.5rem;
cursor: pointer;
background: none;
border: none;
color: inherit;
}
.dropdown-toggle i {
margin-right: 0.5rem;
}
.dropdown-toggle .fa-caret-down {
margin-left: 0.5rem;
}
.dropdown-content {
display: none;
position: absolute;
right: 0;
background-color: #f9f9f9;
min-width: 160px;
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
z-index: 1000;
border-radius: 0.25rem;
}
.dropdown-content a {
color: #333;
padding: 12px 16px;
text-decoration: none;
display: block;
font-size: 0.875rem;
}
.dropdown-content a:hover {
background-color: #f1f1f1;
}
/* Logout-Button */
.logout-btn {
display: flex;
align-items: center;
padding: 0.375rem 0.75rem;
background-color: rgba(231, 76, 60, 0.8);
color: white;
border-radius: 0.25rem;
font-size: 0.875rem;
transition: background-color 0.2s;
margin-top: 1rem;
margin-left: 1rem;
margin-right: 1rem;
}
.logout-btn:hover {
background-color: rgba(231, 76, 60, 1);
}
.logout-btn i {
margin-right: 0.5rem;
}

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(222)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(222)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,222 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677
683
691
701
709
719
727
733
739
743
751
757
761
769
773
787
797
809
811
821
823
827
829
839
853
857
859
863
877
881
883
887
907
911
919
929
937
941
947
953
967
971
977
983
991
997
1009
1013
1019
1021
1031
1033
1039
1049
1051
1061
1063
1069
1087
1091
1093
1097
1103
1109
1117
1123
1129
1151
1153
1163
1171
1181
1187
1193
1201
1213
1217
1223
1229
1231
1237
1249
1259
1277
1279
1283
1289
1291
1297
1301
1303
1307
1319
1321
1327
1361
1367
1373
1381
1399

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(111)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(111)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,111 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(123)
primes_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": primes_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(123)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,123 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541
547
557
563
569
571
577
587
593
599
601
607
613
617
619
631
641
643
647
653
659
661
673
677

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(123)
primes_str = ','.join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": primes_str,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(123)\nprimes_str = ','.join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": primes_str,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1 +0,0 @@
2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(100)
primes_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": primes_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(100)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,100 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523
541

View file

@ -1,37 +0,0 @@
# Meaning of "The Pie" - Research Report
## Executive Summary
This report explores the literal, idiomatic, and metaphorical meanings of the phrase "the pie" in English. The research investigates how "the pie" is used in common English phrases and expressions. The findings reveal that "the pie" can refer to both a literal baked dish and a metaphorical concept representing a shareable resource or opportunity. Additionally, the phrase is often used in idiomatic expressions to convey ideas related to division, sharing, and competition.
## Research Questions and Findings
### 1. What is the literal meaning of 'the pie' in English?
The literal meaning of "the pie" refers to a baked dish typically consisting of a pastry crust filled with sweet or savory ingredients. Pies are a common culinary item in many cultures and can vary widely in terms of ingredients and preparation methods. The term "pie" can denote various types of dishes, including fruit pies, meat pies, and custard pies.
### 2. Are there any idiomatic or metaphorical meanings associated with 'the pie'?
Yes, "the pie" is often used metaphorically to represent a finite resource or opportunity that can be divided among different parties. This metaphorical usage is prevalent in discussions about economics, business, and social issues, where "the pie" symbolizes wealth, market share, or other resources that stakeholders aim to divide or compete for.
### 3. How is 'the pie' used in common English phrases or expressions?
"The pie" is frequently used in idiomatic expressions, such as:
- **"A piece of the pie"**: This expression means obtaining a share of something valuable or desirable. It is often used in contexts where individuals or groups seek a portion of a larger resource or opportunity.
- **"Growing the pie"**: This phrase refers to increasing the total amount of a resource or opportunity available, rather than merely dividing the existing amount. It is commonly used in economic contexts to suggest that creating more wealth or opportunities benefits everyone involved.
- **"Cutting the pie"**: This expression involves dividing a resource or opportunity among different parties. It emphasizes the allocation process and is often used in discussions about fairness and equity.
## Sources and Citations
Due to the nature of this report, specific sources were not directly cited. However, the information presented is based on common linguistic and cultural understandings of the phrase "the pie" in English. For further reading, dictionaries and idiom reference books can provide additional insights into the usage and meanings of this phrase.
## Conclusion
The phrase "the pie" in English encompasses both literal and metaphorical meanings. While it literally refers to a type of baked dish, its idiomatic usage is widespread in discussions about resource allocation and competition. Understanding these different contexts enhances comprehension of how "the pie" is used in everyday language and specialized discussions.
---
This report is intended to provide a comprehensive overview of the meanings and uses of "the pie" in English. It is formatted to meet scholarly standards and aims to offer clear and accurate insights into the phrase's significance.

View file

@ -1,117 +0,0 @@
Translation of 'The Pie' Explanation
====================================
EXECUTIVE SUMMARY
-----------------
Executive Summary: Übersetzung der Erklärung von 'The Pie'
Diese Zusammenfassung bietet einen Überblick über den Bericht zur Übersetzung der Erklärung von 'The Pie' aus dem Englischen ins Deutsche. Der Bericht richtet sich an ein allgemeines Publikum und untersucht die verschiedenen Bedeutungen des Begriffs 'the pie'.
Der Bericht gliedert sich in drei Hauptthemen:
1. **Wörtliche Bedeutung von 'the pie':** Der Begriff 'the pie' wird im Englischen häufig als Synonym für einen Kuchen oder eine Torte verwendet. Diese Bedeutung ist in vielen Kulturen bekannt und wird in der deutschen Übersetzung als 'der Kuchen' oder 'die Torte' wiedergegeben.
2. **Metaphorische Bedeutung von 'the pie':** Im übertragenen Sinne wird 'the pie' oft verwendet, um einen Anteil an einem Ganzen zu beschreiben, insbesondere in wirtschaftlichen oder finanziellen Kontexten. In der deutschen Sprache wird dies häufig mit dem Ausdruck 'das Stück vom Kuchen' übersetzt, um den Anteil an Ressourcen oder Gewinnen zu verdeutlichen.
3. **Idiomatismen im Zusammenhang mit 'the pie':** Der Bericht beleuchtet auch idiomatische Ausdrücke, die 'the pie' beinhalten, wie z.B. "to have a finger in every pie", was im Deutschen als "überall seine Finger im Spiel haben" übersetzt wird. Solche Redewendungen sind kulturell spezifisch und erfordern eine sorgfältige Übersetzung, um die beabsichtigte Bedeutung zu bewahren.
Schlussfolgerungen und Empfehlungen des Berichts betonen die Bedeutung einer präzisen und kontextsensitiven Übersetzung, um Missverständnisse zu vermeiden und die kulturellen Nuancen beizubehalten. Es wird empfohlen, bei der Übersetzung von idiomatischen Ausdrücken auf etablierte deutsche Redewendungen zurückzugreifen, um die Verständlichkeit und Relevanz für das Zielpublikum zu gewährleisten.
Dieser Bericht bietet wertvolle Einblicke für alle, die sich mit der Übersetzung und Interpretation von Sprachbildern beschäftigen, und unterstreicht die Herausforderungen und Möglichkeiten, die mit der Übertragung von Bedeutungen zwischen verschiedenen Sprachen verbunden sind.
Einführung in den Bericht: Übersetzung der Erklärung von 'The Pie'
Zweck und Umfang des Dokuments:
Dieser Bericht zielt darauf ab, die Erklärung des Begriffs 'The Pie' aus dem Englischen ins Deutsche zu übersetzen und dabei sowohl die wörtliche als auch die metaphorische Bedeutung zu beleuchten. Der Bericht richtet sich an ein allgemeines Publikum und bietet eine umfassende Analyse der verschiedenen Bedeutungen und Verwendungen des Begriffs 'The Pie'.
Hintergrundinformationen:
Der Begriff 'The Pie' ist in der englischen Sprache weit verbreitet und wird in verschiedenen Kontexten verwendet. Neben seiner wörtlichen Bedeutung als Gebäckstück hat 'The Pie' auch metaphorische und idiomatische Bedeutungen, die in der Alltagssprache häufig vorkommen. Diese unterschiedlichen Bedeutungen können in der Übersetzung Herausforderungen darstellen, die in diesem Bericht adressiert werden.
Inhalt des Dokuments:
Der Bericht gliedert sich in drei Hauptabschnitte. Zunächst wird die wörtliche Bedeutung von 'The Pie' untersucht, gefolgt von einer Analyse der metaphorischen Bedeutungen, die der Begriff in verschiedenen kulturellen und sozialen Kontexten annehmen kann. Abschließend werden idiomatische Ausdrücke, die 'The Pie' beinhalten, vorgestellt und ihre Übersetzungen ins Deutsche diskutiert.
Ton und Stil:
Der Bericht ist formell gehalten, um die Professionalität und Genauigkeit der Übersetzungen zu gewährleisten. Gleichzeitig wird darauf geachtet, dass die Informationen für ein breites Publikum zugänglich und verständlich sind. Ziel ist es, den Leser sowohl zu informieren als auch zu interessieren, indem die Vielschichtigkeit des Begriffs 'The Pie' auf anschauliche Weise dargestellt wird.
Introduction to 'The Pie'
-------------------------
Title: Introduction to 'The Pie'
In diesem Abschnitt des Berichts "Übersetzung der Erklärung von 'The Pie'" wird eine umfassende Einführung in die Bedeutung des Begriffs "The Pie" gegeben. Diese Einführung gliedert sich in zwei Hauptunterabschnitte: die wörtliche und die metaphorische Bedeutung. Ziel ist es, ein tiefes Verständnis für die verschiedenen Kontexte zu vermitteln, in denen dieser Begriff verwendet wird.
Literal Meaning
Wörtlich betrachtet bezieht sich "The Pie" auf ein gebackenes Gericht, das aus einer Teighülle besteht, die mit süßen oder herzhaften Zutaten gefüllt ist. Diese kulinarische Definition ist in vielen Kulturen verbreitet und variiert je nach regionalen Vorlieben und Traditionen. Ein klassisches Beispiel ist der Apfelkuchen, der in vielen westlichen Ländern als Dessert beliebt ist. Ebenso gibt es herzhafte Varianten wie den Fleischkuchen, der oft als Hauptgericht serviert wird. Die Vielfalt der Füllungen und die Kunst der Zubereitung machen "The Pie" zu einem vielseitigen und geschätzten Bestandteil der internationalen Küche.
Metaphorical Meaning
Metaphorisch gesehen steht "The Pie" für eine begrenzte Ressource oder Gelegenheit, die unter verschiedenen Parteien aufgeteilt werden kann. Diese Bedeutung wird häufig in wirtschaftlichen und sozialen Kontexten verwendet, um die Verteilung von Wohlstand, Macht oder anderen wertvollen Ressourcen zu beschreiben. Ein gängiger Ausdruck ist "ein Stück vom Kuchen abbekommen", was bedeutet, einen Anteil an etwas Wertvollem zu erhalten. Ein weiteres Beispiel ist der Ausdruck "den Kuchen vergrößern", der darauf abzielt, die Gesamtmenge der verfügbaren Ressourcen zu erhöhen, sodass alle Beteiligten profitieren können. Diese metaphorische Verwendung von "The Pie" verdeutlicht die Dynamik von Wettbewerb und Kooperation in verschiedenen Lebensbereichen.
Zusammenfassend bietet die Einführung in "The Pie" sowohl eine detaillierte Betrachtung der wörtlichen als auch der metaphorischen Bedeutung. Diese duale Perspektive ermöglicht es, die Vielschichtigkeit des Begriffs zu verstehen und seine Anwendung in unterschiedlichen Kontexten zu erkennen.
Literal Meaning of 'The Pie'
----------------------------
Title: Wörtliche Bedeutung von 'The Pie'
Einleitung:
In diesem Abschnitt wird die wörtliche Bedeutung des Begriffs "The Pie" untersucht. Der Fokus liegt auf der Definition eines Kuchens und den gängigen Arten von Kuchen, die in verschiedenen Kulturen und Küchen zu finden sind.
Definition eines Kuchens:
Ein Kuchen ist ein gebackenes Gericht, das aus einer Teigkruste besteht, die mit süßen oder herzhaften Zutaten gefüllt ist. Die Kruste kann aus verschiedenen Arten von Teig bestehen, wie Mürbeteig, Blätterteig oder Hefeteig. Die Füllungen variieren je nach Rezept und können Früchte, Fleisch, Gemüse, Käse oder eine Kombination dieser Zutaten enthalten. Der Kuchen wird in der Regel in einem Ofen gebacken, bis die Kruste goldbraun und die Füllung durchgegart ist.
Gängige Arten von Kuchen:
1. **Süße Kuchen**:
- **Apfelkuchen**: Ein klassischer süßer Kuchen, der mit einer Füllung aus geschnittenen Äpfeln, Zucker und Gewürzen wie Zimt gefüllt ist.
- **Schokoladenkuchen**: Ein reichhaltiger Kuchen, oft mit einer Füllung aus Schokolade oder Schokoladencreme.
- **Zitronen-Baiser-Kuchen**: Ein Kuchen mit einer Zitronencremefüllung, bedeckt mit einer leichten Baiserhaube.
2. **Herzhafte Kuchen**:
- **Quiche**: Ein französischer herzhafter Kuchen, der aus einer Eier-Sahne-Füllung besteht, oft mit Speck, Käse und Gemüse.
- **Fleischpastete**: Ein herzhafter Kuchen, gefüllt mit gewürztem Fleisch, oft Rind oder Lamm, und Gemüse.
- **Gemüsekuchen**: Ein Kuchen, der mit einer Mischung aus verschiedenen Gemüsesorten gefüllt ist, manchmal mit Käse oder einer Sahnesoße.
Schlussfolgerung:
Die wörtliche Bedeutung von "The Pie" umfasst eine Vielzahl von Gerichten, die in der kulinarischen Welt weit verbreitet sind. Ob süß oder herzhaft, Kuchen bieten eine Vielfalt an Geschmacksrichtungen und Texturen, die sie zu einem beliebten Gericht in vielen Kulturen machen.
Metaphorical Meaning of 'The Pie'
---------------------------------
Title: Metaphorische Bedeutung von 'The Pie'
In diesem Abschnitt des Berichts "Translation of 'The Pie' Explanation" wird die metaphorische Bedeutung des Begriffs 'The Pie' untersucht. Der Begriff 'The Pie' wird häufig in idiomatischen Ausdrücken verwendet, um Konzepte wie Ressourcenverteilung und Wachstum zu veranschaulichen. Diese Ausdrücke sind tief in der englischen Sprache verwurzelt und haben auch in der deutschen Sprache ihre Entsprechungen gefunden. Im Folgenden werden die wichtigsten idiomatischen Ausdrücke im Detail erläutert.
**Idiomatic Expressions**
1. **Ein Stück vom Kuchen (A piece of the pie)**
- Erklärung: Der Ausdruck "ein Stück vom Kuchen" bezieht sich auf das Erlangen eines Anteils an etwas Wertvollem oder Begehrenswertem. In wirtschaftlichen oder geschäftlichen Kontexten bedeutet dies oft, dass eine Person oder Gruppe einen Teil der verfügbaren Ressourcen oder Gewinne beansprucht.
- Beispiel: In einem Unternehmen könnte ein Mitarbeiter, der eine Gehaltserhöhung fordert, als jemand angesehen werden, der "ein größeres Stück vom Kuchen" möchte.
2. **Den Kuchen vergrößern (Growing the pie)**
- Erklärung: Der Ausdruck "den Kuchen vergrößern" beschreibt die Idee, das Gesamtvolumen der verfügbaren Ressourcen oder Möglichkeiten zu erhöhen, anstatt sich nur auf die Verteilung der bestehenden Ressourcen zu konzentrieren. Dies ist ein Konzept, das oft in wirtschaftlichen Diskussionen über Wachstum und Expansion verwendet wird.
- Beispiel: Eine Regierung könnte durch Investitionen in Bildung und Infrastruktur versuchen, "den Kuchen zu vergrößern", um langfristig mehr Wohlstand für alle Bürger zu schaffen.
3. **Den Kuchen schneiden (Cutting the pie)**
- Erklärung: "Den Kuchen schneiden" bezieht sich auf die Art und Weise, wie vorhandene Ressourcen oder Möglichkeiten unter verschiedenen Parteien aufgeteilt werden. Dies kann sowohl in positiven als auch in negativen Kontexten verwendet werden, je nachdem, wie gerecht oder ungerecht die Verteilung wahrgenommen wird.
- Beispiel: Bei der Budgetplanung einer Organisation wird oft diskutiert, wie "der Kuchen geschnitten" werden sollte, um die verschiedenen Abteilungen angemessen zu finanzieren.
Diese idiomatischen Ausdrücke veranschaulichen, wie der Begriff 'The Pie' über seine wörtliche Bedeutung hinausgeht und in verschiedenen Kontexten verwendet wird, um komplexe wirtschaftliche und soziale Konzepte zu verdeutlichen. Sie sind ein integraler Bestandteil der Sprache und bieten wertvolle Einblicke in die Denkweise über Ressourcen und deren Verteilung.
CONCLUSION
----------
Abschlussbericht: Übersetzung der Erklärung von 'The Pie'
In diesem Bericht haben wir die verschiedenen Bedeutungen des Begriffs 'the pie' untersucht und seine Übersetzung ins Deutsche analysiert. Zunächst wurde der wörtliche Sinn von 'the pie' betrachtet, der sich auf ein gebackenes Gericht bezieht, das in vielen Kulturen beliebt ist. Diese grundlegende Bedeutung bildet die Grundlage für ein tieferes Verständnis der metaphorischen und idiomatischen Verwendungen des Begriffs.
Im weiteren Verlauf des Berichts wurde die metaphorische Bedeutung von 'the pie' erörtert, die oft im wirtschaftlichen oder sozialen Kontext verwendet wird, um Ressourcen oder Chancen zu symbolisieren, die unter verschiedenen Parteien aufgeteilt werden können. Diese Metapher ist besonders relevant in Diskussionen über Gerechtigkeit und Verteilung.
Darüber hinaus wurden idiomatische Ausdrücke, die 'the pie' beinhalten, analysiert. Diese Redewendungen, wie zum Beispiel "to have a finger in every pie", verdeutlichen die Vielseitigkeit und den kulturellen Reichtum, den solche Ausdrücke in der englischen Sprache bieten.
Zusammenfassend lässt sich sagen, dass die Übersetzung von 'the pie' ins Deutsche nicht nur eine sprachliche, sondern auch eine kulturelle Herausforderung darstellt. Es ist wichtig, sowohl die wörtliche als auch die übertragene Bedeutung zu berücksichtigen, um eine präzise und kontextgerechte Übersetzung zu gewährleisten.
Als Empfehlung für zukünftige Arbeiten sollte eine detaillierte Untersuchung der kulturellen Konnotationen und der Verwendung von 'the pie' in verschiedenen Kontexten erfolgen, um ein noch tieferes Verständnis zu erlangen. Zudem könnte die Erstellung eines Glossars mit häufig verwendeten idiomatischen Ausdrücken und deren Übersetzungen hilfreich sein.
Dieser Bericht unterstreicht die Bedeutung der Sprach- und Kulturkompetenz bei der Übersetzung und bietet wertvolle Einblicke für Übersetzer und Sprachinteressierte.

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(count):
primes = []
num = 2
while len(primes) < count:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(99)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(99)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"prime_numbers.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]

View file

@ -1,99 +0,0 @@
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
79
83
89
97
101
103
107
109
113
127
131
137
139
149
151
157
163
167
173
179
181
191
193
197
199
211
223
227
229
233
239
241
251
257
263
269
271
277
281
283
293
307
311
313
317
331
337
347
349
353
359
367
373
379
383
389
397
401
409
419
421
431
433
439
443
449
457
461
463
467
479
487
491
499
503
509
521
523

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_primes(limit):
primes = []
num = 2
while len(primes) < limit:
if is_prime(num):
primes.append(num)
num += 1
return primes
primes = generate_primes(1000)
prime_numbers_content = "\n".join(map(str, primes))
result = {
"prime_numbers.txt": {
"content": prime_numbers_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -1,38 +0,0 @@
inputFiles = [] # DO NOT CHANGE THIS LINE
def is_prime(n):
if n <= 1:
return False
if n <= 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def generate_first_n_primes(n):
primes = []
num = 2
while len(primes) < n:
if is_prime(num):
primes.append(num)
num += 1
return primes
first_77_primes = generate_first_n_primes(77)
primes_content = "\n".join(map(str, first_77_primes))
result = {
"first_77_primes.txt": {
"content": primes_content,
"base64Encoded": False,
"contentType": "text/plain"
}
}
import json
print(json.dumps(result))

View file

@ -1,19 +0,0 @@
[
{
"attempt": 1,
"code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_first_n_primes(n):\n primes = []\n num = 2\n while len(primes) < n:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nfirst_77_primes = generate_first_n_primes(77)\nprimes_content = \"\\n\".join(map(str, first_77_primes))\n\nresult = {\n \"first_77_primes.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
"result": {
"success": true,
"output": "{\"first_77_primes.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
"error": "",
"result": {
"first_77_primes.txt": {
"content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389",
"base64Encoded": false,
"contentType": "text/plain"
}
},
"exitCode": 0
}
}
]