diff --git a/app.py b/app.py
index f29436cc..ad93990c 100644
--- a/app.py
+++ b/app.py
@@ -20,10 +20,6 @@ from datetime import datetime
from modules.shared.configuration import APP_CONFIG
from modules.shared.eventManagement import eventManager
-from modules.workflows.automation import subAutomationSchedule
-from modules.workflows.automation2 import subAutomation2Schedule
-from modules.features.automation2.emailPoller import start as startAutomation2EmailPoller
-from modules.features.automation2.emailPoller import stop as stopAutomation2EmailPoller
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.system.registry import loadFeatureMainModules
@@ -246,6 +242,8 @@ def initLogging():
"fastapi.security.oauth2",
"msal",
"azure.core.pipeline.policies.http_logging_policy",
+ "stripe",
+ "apscheduler",
]
for loggerName in noisyLoggers:
logging.getLogger(loggerName).setLevel(logging.WARNING)
@@ -296,16 +294,7 @@ except Exception as e:
async def lifespan(app: FastAPI):
logger.info("Application is starting up")
- # --- Pre-warm AI connectors FIRST (before any other startup work) ---
- # Avoids 4–8 s latency on first chatbot request; must run before first use.
- try:
- import modules.aicore.aicoreModelRegistry # noqa: F401 - triggers eager pre-warm
- from modules.aicore.aicoreModelRegistry import modelRegistry
- modelRegistry.ensureConnectorsRegistered()
- modelRegistry.refreshModels(force=True)
- logger.info("AI connectors and model registry pre-warmed")
- except Exception as e:
- logger.warning(f"AI pre-warm failed: {e}")
+ # AI connectors already pre-warmed at module-load via _eager_prewarm() in aicoreModelRegistry.
# Bootstrap database if needed (creates initial users, mandates, roles, etc.)
# This must happen before getting root interface
@@ -328,6 +317,15 @@ async def lifespan(app: FastAPI):
except Exception as e:
logger.error(f"Feature catalog registration failed: {e}")
+ # Sync gateway i18n registry to DB and load translation cache
+ try:
+ from modules.shared.i18nRegistry import _syncRegistryToDb, _loadCache
+ await _syncRegistryToDb()
+ await _loadCache()
+ logger.info("i18n registry sync + cache load completed")
+ except Exception as e:
+ logger.warning(f"i18n registry sync failed (non-critical): {e}")
+
# Pre-warm service center modules (avoids first-request import latency)
try:
from modules.serviceCenter import preWarm
@@ -360,44 +358,20 @@ async def lifespan(app: FastAPI):
try:
main_loop = asyncio.get_running_loop()
eventManager.set_event_loop(main_loop)
- subAutomation2Schedule.set_main_loop(main_loop)
+ from modules.workflows.scheduler.mainScheduler import setMainLoop as setSchedulerMainLoop
+ setSchedulerMainLoop(main_loop)
except RuntimeError:
pass
- subAutomationSchedule.start(eventUser) # Automation scheduler
- subAutomation2Schedule.start(eventUser) # Automation2 schedule trigger (cron)
- # Automation2 email poller: started on-demand when a run pauses for email.checkEmail
eventManager.start()
# Register audit log cleanup scheduler
from modules.shared.auditLogger import registerAuditLogCleanupScheduler
registerAuditLogCleanupScheduler()
- # Ensure billing settings and accounts exist for all mandates
- try:
- from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface
-
- billingInterface = getBillingRootInterface()
-
- # Step 1: Ensure all mandates have billing settings (creates defaults if missing)
- settingsCreated = billingInterface.ensureAllMandateSettingsExist()
- if settingsCreated > 0:
- logger.info(f"Billing startup: Created {settingsCreated} missing mandate billing settings")
-
- # Step 2: Ensure all users have billing audit accounts
- accountsCreated = billingInterface.ensureAllUserAccountsExist()
- if accountsCreated > 0:
- logger.info(f"Billing startup: Created {accountsCreated} missing user accounts")
-
- except Exception as e:
- logger.warning(f"Failed to ensure billing settings/accounts (non-critical): {e}")
-
yield
# --- Stop Managers ---
- stopAutomation2EmailPoller(eventUser) # Automation2 email poller (no-op if not running)
- subAutomation2Schedule.stop(eventUser) # Automation2 schedule
eventManager.stop()
- subAutomationSchedule.stop(eventUser) # Automation scheduler
# --- Stop Feature Containers (Plug&Play) ---
try:
@@ -516,6 +490,16 @@ from modules.auth import (
ProactiveTokenRefreshMiddleware,
)
+# i18n language detection middleware (sets per-request language from Accept-Language header)
+from modules.shared.i18nRegistry import _setLanguage, normalizePrimaryLanguageTag
+
+@app.middleware("http")
+async def _i18nMiddleware(request: Request, call_next):
+ acceptLang = request.headers.get("Accept-Language", "")
+ lang = normalizePrimaryLanguageTag(acceptLang, "de")
+ _setLanguage(lang)
+ return await call_next(request)
+
app.add_middleware(CSRFMiddleware)
# Token refresh middleware (silent refresh for expired OAuth tokens)
@@ -586,27 +570,15 @@ app.include_router(voiceGoogleRouter)
from modules.routes.routeVoiceUser import router as voiceUserRouter
app.include_router(voiceUserRouter)
-from modules.routes.routeSecurityAdmin import router as adminSecurityRouter
-app.include_router(adminSecurityRouter)
-
from modules.routes.routeSharepoint import router as sharepointRouter
app.include_router(sharepointRouter)
-from modules.routes.routeAdminAutomationEvents import router as adminAutomationEventsRouter
-app.include_router(adminAutomationEventsRouter)
-
-from modules.routes.routeAdminAutomationLogs import router as adminAutomationLogsRouter
-app.include_router(adminAutomationLogsRouter)
-
from modules.routes.routeAdminLogs import router as adminLogsRouter
app.include_router(adminLogsRouter)
from modules.routes.routeAdminRbacRules import router as rbacAdminRulesRouter
app.include_router(rbacAdminRulesRouter)
-from modules.routes.routeMessaging import router as messagingRouter
-app.include_router(messagingRouter)
-
from modules.routes.routeAdminFeatures import router as featuresAdminRouter
app.include_router(featuresAdminRouter)
@@ -619,12 +591,15 @@ app.include_router(invitationsRouter)
from modules.routes.routeNotifications import router as notificationsRouter
app.include_router(notificationsRouter)
-from modules.routes.routeAdminRbacExport import router as rbacAdminExportRouter
-app.include_router(rbacAdminExportRouter)
+from modules.routes.routeI18n import router as i18nRouter
+app.include_router(i18nRouter)
from modules.routes.routeAdminUserAccessOverview import router as userAccessOverviewRouter
app.include_router(userAccessOverviewRouter)
+from modules.routes.routeAdminDemoConfig import router as demoConfigRouter
+app.include_router(demoConfigRouter)
+
from modules.routes.routeGdpr import router as gdprRouter
app.include_router(gdprRouter)
@@ -641,6 +616,9 @@ from modules.routes.routeSystem import router as systemRouter, navigationRouter
app.include_router(systemRouter)
app.include_router(navigationRouter)
+from modules.routes.routeWorkflowDashboard import router as workflowDashboardRouter
+app.include_router(workflowDashboardRouter)
+
# ============================================================================
# PLUG&PLAY FEATURE ROUTERS
# Dynamically load routers from feature containers in modules/features/
diff --git a/config.ini b/config.ini
index 4a37f2f8..90377f07 100644
--- a/config.ini
+++ b/config.ini
@@ -45,6 +45,11 @@ Connector_StacSwisstopo_MAX_RETRIES = 3
Connector_StacSwisstopo_RETRY_DELAY = 1.0
Connector_StacSwisstopo_ENABLE_CACHE = True
+# Demo RMA credentials (same for all demo trustee instances)
+Demo_RMA_ApiBaseUrl = https://service.int.runmyaccounts.com/api/latest/clients/
+Demo_RMA_ClientName = poweronag
+Demo_RMA_ApiKey = pat_tipTbnHU26CrMzAnLSjCR_uzHJv4CDNa7obaQGHIA-4
+
# Operator company information (shown on invoice emails)
Operator_CompanyName = PowerOn AG
Operator_Address = Birmensdorferstrasse 94, 8003 Zürich
diff --git a/demoData/expenses/B2025-01a.pdf b/demoData/expenses/B2025-01a.pdf
new file mode 100644
index 00000000..e820d3b4
Binary files /dev/null and b/demoData/expenses/B2025-01a.pdf differ
diff --git a/demoData/expenses/B2025-02c.pdf b/demoData/expenses/B2025-02c.pdf
new file mode 100644
index 00000000..fea119b4
Binary files /dev/null and b/demoData/expenses/B2025-02c.pdf differ
diff --git a/demoData/expenses/B2025-03a.pdf b/demoData/expenses/B2025-03a.pdf
new file mode 100644
index 00000000..a858ecb0
Binary files /dev/null and b/demoData/expenses/B2025-03a.pdf differ
diff --git a/demoData/expenses/B2025-05a.pdf b/demoData/expenses/B2025-05a.pdf
new file mode 100644
index 00000000..38487e77
Binary files /dev/null and b/demoData/expenses/B2025-05a.pdf differ
diff --git a/demoData/expenses/B2025-05c.pdf b/demoData/expenses/B2025-05c.pdf
new file mode 100644
index 00000000..10bd908b
Binary files /dev/null and b/demoData/expenses/B2025-05c.pdf differ
diff --git a/demoData/expenses/B2025-08a.pdf b/demoData/expenses/B2025-08a.pdf
new file mode 100644
index 00000000..189ba981
Binary files /dev/null and b/demoData/expenses/B2025-08a.pdf differ
diff --git a/demoData/invoices/Digitec_Rechnung_63650751.pdf b/demoData/invoices/Digitec_Rechnung_63650751.pdf
new file mode 100644
index 00000000..f5a61c3d
Binary files /dev/null and b/demoData/invoices/Digitec_Rechnung_63650751.pdf differ
diff --git a/demoData/invoices/ELKIGescannt_20250503-2030.pdf b/demoData/invoices/ELKIGescannt_20250503-2030.pdf
new file mode 100644
index 00000000..b17a6236
Binary files /dev/null and b/demoData/invoices/ELKIGescannt_20250503-2030.pdf differ
diff --git a/demoData/knowledge-base/2025-10-investor-detail.md b/demoData/knowledge-base/2025-10-investor-detail.md
new file mode 100644
index 00000000..9e312226
--- /dev/null
+++ b/demoData/knowledge-base/2025-10-investor-detail.md
@@ -0,0 +1,256 @@
+# PowerOn AI Platform - Investoren-Dokumentation
+## Stand: 14. Oktober 2025
+
+---
+
+## Executive Summary
+
+PowerOn ist eine Software, die Unternehmen dabei hilft, wiederkehrende Aufgaben zu automatisieren. Statt dass Mitarbeiter manuell Daten sammeln, Dokumente durcharbeiten und Berichte schreiben, übernimmt PowerOn diese Arbeiten.
+
+### Das Problem, das PowerOn löst
+Mitarbeiter verbringen 30% ihrer Arbeitszeit damit, Informationen zu suchen. Unternehmen haben Schwierigkeiten, große Dokumente zu analysieren, aktuelle Marktdaten zu sammeln und regelmäßige Berichte zu erstellen. PowerOn automatisiert diese Aufgaben.
+
+### Wie PowerOn funktioniert
+Ein Benutzer gibt eine Aufgabe ein, zum Beispiel "Lese meine Mails der internen Mailbox der letzten 2 Wochen, fasse diese pro Thema im Sharepoint Marketing Ordner zusammen und verfasse eine Antwort für die wichtigsten Kunden". PowerOn verbindet sich dann automatisch mit Outlook, SharePoint und anderen Systemen, sammelt die Daten, analysiert sie und erstellt die gewünschten Zusammenfassungen und Antworten.
+
+### Gemessene Verbesserungen
+Tests mit Pilotkunden zeigen:
+- Marktanalysen: von 3-4 Wochen auf 3-5 Tage
+- Berichterstellung: 62% Zeitersparnis
+- Prototypenentwicklung: 70% schneller
+- Dokumentenanalyse: 80% weniger Zeitaufwand
+
+---
+
+## 1. Kernfunktionen von PowerOn
+
+### 1.1 Was PowerOn tatsächlich macht
+
+PowerOn ist eine KI-gestützte Workflow-Engine, die drei verschiedene Arbeitsabläufe unterstützt:
+
+**Dynamische Workflows**: PowerOn passt sich automatisch an neue Aufgaben an. Ein Benutzer kann jede beliebige Anfrage stellen, und das System findet den besten Weg zur Lösung.
+
+**Action-Plan Workflows**: PowerOn plant komplexe Aufgaben selbstständig. Das System teilt große Projekte in kleinere Schritte auf und führt diese automatisch aus.
+
+**Feste Geschäftsprozesse**: Unternehmen können standardisierte Abläufe definieren, die PowerOn immer gleich ausführt, zum Beispiel monatliche Berichte oder regelmäßige Marktanalysen.
+
+### 1.2 Kernfunktionen
+
+**Dokumentenanalyse**: Das System liest große Dokumente (PDF, Word, Excel) und extrahiert die wichtigsten Informationen. Ein 200-seitiger Vertrag wird automatisch zusammengefasst.
+
+**Web-Recherche**: PowerOn sucht im Internet nach aktuellen Informationen zu einem Thema und sammelt relevante Daten von verschiedenen Websites.
+
+**Berichterstellung**: Basierend auf den gesammelten Daten und Dokumenten erstellt das System fertige Berichte in verschiedenen Formaten (PDF, Word, Excel).
+
+**Code-Generierung**: PowerOn kann einfache Programme und Skripte erstellen, um wiederkehrende Aufgaben zu automatisieren.
+
+### 1.3 Wie der Arbeitsablauf funktioniert
+
+Ein Benutzer gibt eine Aufgabe ein, zum Beispiel "Analysiere die Konkurrenz im E-Mobilitätssektor". PowerOn führt dann automatisch folgende Schritte aus:
+
+1. Sucht im Internet nach aktuellen Informationen über E-Mobilitätsunternehmen
+2. Analysiert vorhandene interne Dokumente des Unternehmens
+3. Erstellt einen strukturierten Bericht mit den wichtigsten Erkenntnissen
+4. Stellt den Bericht in verschiedenen Formaten zur Verfügung
+
+### 1.4 Technische Besonderheiten
+
+**Keine Größenbeschränkungen**: PowerOn kann beliebig große Dokumente verarbeiten und unbegrenzt viele Berichte erstellen. Das System umgeht die normalen Grenzen von KI-Systemen durch intelligente Aufteilung.
+
+**Automatische Datenschutz-Funktion**: Sensible Daten werden automatisch erkannt und vor der Verarbeitung entfernt. Nach der Analyse werden die Daten wieder eingefügt, sodass der Bericht vollständig ist, aber keine vertraulichen Informationen preisgegeben werden.
+
+**Mehrere KI-Anbieter**: PowerOn arbeitet gleichzeitig mit verschiedenen KI-Systemen (OpenAI, Anthropic, Perplexity). Wenn ein System ausfällt oder überlastet ist, übernimmt automatisch ein anderes. Das gewährleistet einen stabilen Betrieb und macht das System unabhängig von einzelnen Anbietern.
+
+**Sicherheit**: Jedes Unternehmen hat einen eigenen, abgeschotteten Bereich. Alle Aktivitäten werden protokolliert.
+
+---
+
+## 2. Warum PowerOn anders ist
+
+### 2.1 Keine technischen Grenzen
+
+Andere KI-Systeme haben strenge Beschränkungen: maximal 50 Seiten Dokument, höchstens 10 Berichte pro Monat. PowerOn hat diese Grenzen nicht. Das System kann 1000-seitige Verträge analysieren und hunderte Berichte erstellen, ohne zusätzliche Kosten.
+
+### 2.2 Automatischer Datenschutz
+
+PowerOn erkennt automatisch sensible Daten wie Namen, Adressen oder Kontonummern und entfernt sie vor der Verarbeitung. Nach der Analyse werden die Daten wieder eingefügt. So entstehen vollständige Berichte ohne Datenschutzverletzungen.
+
+### 2.3 Stabile und unabhängige Technologie
+
+PowerOn arbeitet mit mehreren KI-Anbietern gleichzeitig. Wenn ein System ausfällt, übernimmt automatisch ein anderes. Das reduziert Ausfallzeiten und macht das Unternehmen unabhängig von einzelnen Anbietern.
+
+### 2.4 Direkte Integration in Unternehmenssysteme
+
+PowerOn verbindet sich direkt mit den Systemen, die Unternehmen täglich nutzen:
+- **E-Mail-Systeme**: Outlook, Gmail für automatische E-Mail-Analyse
+- **Dokumentenmanagement**: SharePoint, Google Drive für Dateizugriff
+- **Projektmanagement**: Jira, ClickUp für Aufgabenverwaltung
+- **Cloud-Speicher**: OneDrive, Dropbox für Dateiintegration
+
+Statt dass Mitarbeiter Daten manuell zwischen verschiedenen Systemen kopieren, arbeitet PowerOn direkt mit allen Systemen zusammen.
+
+### 2.5 Drei verschiedene Arbeitsweisen
+
+**Dynamisch**: PowerOn passt sich an jede neue Aufgabe an. Ein Benutzer kann jede beliebige Anfrage stellen.
+
+**Action-Plan**: PowerOn plant komplexe Projekte selbstständig und teilt sie in machbare Schritte auf.
+
+**Standardisiert**: Unternehmen können feste Abläufe definieren, die PowerOn immer gleich ausführt.
+
+### 2.6 Einfache Bedienung
+
+Mitarbeiter müssen nicht programmieren können. Sie geben einfach ein, was sie brauchen, und PowerOn macht den Rest. Ein Marketing-Manager kann eine Konkurrenzanalyse bestellen, ohne IT-Kenntnisse zu haben.
+
+---
+
+## 3. Markt und Geschäftsmodell
+
+### 3.1 Zielkunden
+
+PowerOn richtet sich hauptsächlich an mittelständische Unternehmen mit 50-500 Mitarbeitern. Diese Unternehmen haben oft komplexe Datenverarbeitungsanforderungen, aber nicht die Ressourcen, um eigene KI-Systeme zu entwickeln.
+
+Typische Kunden sind Beratungsunternehmen, Banken, Versicherungen, Kliniken und andere Dienstleister, die regelmäßig Analysen und Berichte erstellen müssen.
+
+### 3.2 Nutzen für Kunden
+
+#### Gemessene Verbesserungen
+Basierend auf Tests mit Pilotkunden:
+- Marktanalysen werden 73% schneller durchgeführt (von 3-4 Wochen auf 3-5 Tage)
+- Berichterstellung spart 62% Zeit ein
+- Prototypenentwicklung ist 70% schneller
+- Dokumentenanalyse reduziert den Zeitaufwand um 80%
+- Kosteneinsparung von 5.000-8.000 Euro pro Marktanalyse
+
+#### Praktische Vorteile
+Mitarbeiter benötigen keine Programmierkenntnisse, um PowerOn zu nutzen. Das System arbeitet mit vorhandenen Daten und Systemen zusammen, ohne dass große Umstellungen erforderlich sind.
+
+### 3.3 Einnahmemodelle
+
+PowerOn plant verschiedene Einnahmequellen:
+1. Monatliche Abonnements pro Benutzer
+2. Nutzungsbasierte Abrechnung für Verarbeitungsleistungen
+3. Individuelle Lizenzen für große Unternehmen
+4. Beratungs- und Implementierungsdienstleistungen
+
+Die genauen Preise werden basierend auf Marktanalysen festgelegt. Das Ziel ist eine Bruttomarge von 75-85% nach der Skalierung.
+
+---
+
+## 4. Risiken und Zukunftssicherheit
+
+### 4.1 Risiken durch bessere KI-Systeme
+
+#### Kurzfristige Risiken (6-12 Monate)
+Wenn KI-Systeme besser werden, könnten einfache Aufgaben wie Textgenerierung zur Standardware werden. Dies könnte den Wert einzelner KI-Funktionen reduzieren. PowerOn ist jedoch darauf ausgelegt, verschiedene KI-Systeme zu koordinieren, was auch bei verbesserten Systemen wertvoll bleibt.
+
+#### Mittelfristige Risiken (1-3 Jahre)
+Einzelne KI-Systeme könnten in der Lage sein, mehr Aufgaben gleichzeitig zu erledigen. Dies könnte die Notwendigkeit der Koordination reduzieren. PowerOn konzentriert sich jedoch auf spezifische Unternehmensanforderungen und die Integration in bestehende Systeme, was weiterhin wertvoll ist.
+
+#### Langfristige Risiken (3+ Jahre)
+Sehr fortgeschrittene KI-Systeme könnten in der Lage sein, komplexe Aufgaben ohne Koordination zu lösen. PowerOn konzentriert sich jedoch auf die spezifischen Anforderungen von Unternehmen, einschließlich Sicherheit, Compliance und Integration, die auch bei fortgeschrittenen KI-Systemen wichtig bleiben.
+
+### 4.2 Was könnte obsolet werden
+
+Einfache Aufgaben wie grundlegende Textgenerierung oder Web-Suche könnten zu Standardfunktionen werden. Auch einfache Datenanalysen könnten automatisiert werden.
+
+### 4.3 Was bleibt wertvoll
+
+Die Koordination verschiedener Systeme, die Integration in Unternehmensprozesse und die Einhaltung von Sicherheits- und Datenschutzbestimmungen bleiben auch bei verbesserten KI-Systemen wichtig. PowerOn ist so aufgebaut, dass es sich an neue Technologien anpassen kann, ohne das gesamte System neu entwickeln zu müssen.
+
+---
+
+## 5. Finanzielle Bewertung
+
+### 5.1 Aktuelle Bewertung der Komponenten
+
+PowerOn besteht aus mehreren wertvollen Komponenten, die einzeln bewertet werden können:
+
+**Frontend-System**: €150.000-250.000
+- Modulare Benutzeroberfläche, die einfach erweitert werden kann
+- Funktioniert in allen gängigen Browsern
+- Anpassbar an verschiedene Unternehmensanforderungen
+
+**Backend-Infrastruktur**: €200.000-300.000
+- Stabile Grundstruktur für alle Funktionen
+- Schnelle Verarbeitung auch bei großen Datenmengen
+- Einfache Integration neuer Funktionen
+
+**Workflow-System**: €250.000-350.000
+- Kernfunktion für die Koordination verschiedener Aufgaben
+- Drei verschiedene Arbeitsweisen (dynamisch, Action-Plan, standardisiert)
+- Automatische Anpassung an neue Anforderungen
+
+**Sicherheits- und Datenschutz-System**: €100.000-150.000
+- Automatische Erkennung und Schutz sensibler Daten
+- Verschiedene Anmeldeverfahren für Unternehmen
+- Vollständige Protokollierung aller Aktivitäten
+
+**Datenverarbeitungs-Engine**: €150.000-200.000
+- Verarbeitung beliebig großer Dokumente
+- Intelligente Aufteilung zur Umgehung von KI-Grenzen
+- Unterstützung aller gängigen Dateiformate
+
+**Multi-Agent-Koordinationssystem**: €300.000-400.000
+- Einzigartige Technologie zur Koordination verschiedener KI-Systeme
+- Automatische Auswahl des besten KI-Anbieters für jede Aufgabe
+- Stabile Ausführung auch bei Ausfällen einzelner Systeme
+
+**Unternehmens-Integration**: €200.000-300.000
+- Anpassung an verschiedene Branchen und Anforderungen
+- Einfache Integration in bestehende Unternehmenssysteme
+- Skalierbare Architektur für wachsende Anforderungen
+
+**Integrations-Framework**: €150.000-200.000
+- Verbindungen zu verschiedenen KI-Anbietern (OpenAI, Anthropic, Perplexity)
+- Direkte Integration in Unternehmenssysteme (Outlook, SharePoint, Google Drive, Jira)
+- Einfache Integration neuer Systeme und Anbieter
+- Unabhängigkeit von einzelnen Anbietern
+
+**Workflow-Management-System**: €100.000-150.000
+- Plan-Act-Observe-Refine-Zyklus für kontinuierliche Verbesserung
+- Echtzeit-Überwachung des Arbeitsfortschritts
+- Automatische Fehlerbehandlung und Wiederaufnahme
+
+**Gesamtbewertung**: €1.6-2.4 Mio.
+
+### 5.2 Investitionsbedarf
+
+PowerOn benötigt Investitionsmittel, um die Entwicklung abzuschließen und den Markt zu erschließen. Die Mittel werden hauptsächlich für die Produktentwicklung, den Aufbau eines Vertriebsteams und die Infrastruktur verwendet.
+
+### 5.3 Wachstumspotenzial
+
+Das System ist darauf ausgelegt, mit wachsenden Anforderungen zu skalieren. Die modulare Architektur ermöglicht es, neue Funktionen hinzuzufügen und die Plattform an verschiedene Kundenanforderungen anzupassen.
+
+---
+
+## 6. Marktpotenzial und Ausstiegsmöglichkeiten
+
+### 6.1 Marktpotenzial
+
+Der Markt für KI-basierte Geschäftsanwendungen wächst schnell. Unternehmen suchen nach Lösungen, die komplexe Aufgaben automatisieren und die Effizienz steigern können. PowerOn positioniert sich in diesem wachsenden Markt.
+
+### 6.2 Ausstiegsmöglichkeiten
+
+Langfristig gibt es verschiedene Möglichkeiten für einen Ausstieg, darunter den Verkauf an größere Softwareunternehmen oder den Börsengang. Diese Optionen hängen von der Entwicklung des Unternehmens und des Marktes ab.
+
+---
+
+## 7. Fazit
+
+### 7.1 Stärken von PowerOn
+
+PowerOn bietet eine einzigartige Lösung für die Koordination verschiedener KI-Systeme. Das System ist darauf ausgelegt, sich an neue Technologien anzupassen, und bietet nachgewiesene Verbesserungen bei Geschäftsprozessen.
+
+### 7.2 Risikofaktoren
+
+Die schnelle Entwicklung der KI-Technologie stellt ein Risiko dar, da einfache Aufgaben möglicherweise obsolet werden. Der Wettbewerb durch größere Unternehmen und die Marktakzeptanz sind weitere Faktoren, die berücksichtigt werden müssen.
+
+### 7.3 Investitionsbewertung
+
+PowerOn befindet sich in einer frühen Entwicklungsphase mit einem funktionsfähigen Grundsystem. Das Potenzial für Wachstum ist vorhanden, aber es gibt auch erhebliche Risiken, die mit der Entwicklung neuer Technologien verbunden sind.
+
+---
+
+*Dokument erstellt am 14. Oktober 2025*
+*Version: 1.0*
+*Autor: PowerOn Development Team*
diff --git a/demoData/knowledge-base/investor-summary.md b/demoData/knowledge-base/investor-summary.md
new file mode 100644
index 00000000..9e646d4e
--- /dev/null
+++ b/demoData/knowledge-base/investor-summary.md
@@ -0,0 +1,175 @@
+# PowerOn AI Platform
+## Investoren-Summary
+
+### Marktpositionierung
+
+Die PowerOn AI Platform ist eine innovative Enterprise-Lösung für die Automatisierung und Optimierung von komplexen geschäftlichen Prozessen durch einen Multi-Agent-KI-Ansatz. Wir positionieren uns an der Schnittstelle zwischen den schnell wachsenden Märkten für:
+- Künstliche Intelligenz (Marktvolumen 2025: $190 Mrd.)
+- Business Process Automation (Marktvolumen 2025: $19,6 Mrd.)
+- Enterprise Knowledge Management (Marktvolumen 2025: $43 Mrd.)
+
+### Wettbewerbsvorteile
+
+1. **Proprietäre Multi-Agent-Technologie**: Unsere Plattform orchestriert spezifische KI-Agenten für verschiedene Aufgaben, was zu deutlich überlegenen Ergebnissen im Vergleich zu Einzelagenten-Ansätzen führt.
+
+2. **Modellunabhängigkeit**: Integration mit führenden KI-Providern (OpenAI, Anthropic) ohne Vendor Lock-in, wodurch wir immer die besten Modelle für spezifische Aufgaben einsetzen können.
+
+3. **Enterprise-Ready**: Entwickelt mit Multi-Tenant-Architektur, umfassenden Sicherheitsfeatures und Skalierbarkeit für Unternehmensanforderungen.
+
+4. **Anpassbar und erweiterbar**: Modulare Architektur, die kontinuierliche Feature-Erweiterungen und kundenspezifische Anpassungen ermöglicht.
+
+5. **Fortschrittliche Workflow-Orchestrierung**:
+ - Intelligente Koordination mehrerer spezialisierter Agenten
+ - Echtzeit-Statusüberwachung und Fortschrittsanzeige
+ - Robuste Fehlerbehandlung und Wiederaufnahmemechanismen
+ - Nahtlose Integration von Dateiverarbeitung und Dokumentenmanagement
+
+6. **Umfassende Enterprise-Features**:
+ - Multi-Tenant-Architektur mit Mandantenverwaltung
+ - Erweiterte Benutzer- und Berechtigungsverwaltung
+ - Enterprise-Grade Sicherheitsfeatures
+ - Skalierbare Infrastruktur
+
+### Finanzielle Highlights
+
+- **Go-to-Market-Strategie**: Initiale Fokussierung auf mittelständische Unternehmen in den Bereichen Professional Services, Finanzdienstleistungen und Gesundheitswesen.
+
+- **Umsatzmodell**: Kombiniertes SaaS-Abonnement (pro Benutzer/Monat) und nutzungsbasierte Abrechnung (pro Verarbeitungseinheit).
+
+- **Erwartete Bruttomarge**: 75-85% nach Erreichen der Skalierung.
+
+- **Erwartetes ARR in Jahr 3**: €4,5 Mio. bei 150 Unternehmenskunden.
+
+- **Kostenstrukturen**:
+ - 40% Produktentwicklung
+ - 30% Vertrieb und Marketing
+ - 20% Betrieb und Support
+ - 10% Verwaltung
+
+### Wachstumspfad
+
+#### Kurzfristig (12 Monate)
+- Markteinführung der Core-Plattform
+- Aufbau von 3-5 Schlüsselreferenzkunden
+- Entwicklung branchenspezifischer Templates
+
+#### Mittelfristig (24 Monate)
+- Erweiterung auf Agentenmarktplatz
+- Integration von proprietären Unternehmensmodellen
+- Internationale Expansion
+
+#### Langfristig (36+ Monate)
+- Entwicklung spezialisierter Branchenlösungen
+- KI-Middleware für Unternehmen
+- Strategische Partnerschaften mit Enterprise-Software-Anbietern
+
+### Investitionsbedarf
+
+Das aktuelle Finanzierungsziel von CHF 2.5 Mio. ermöglicht:
+- Abschluss der Produktentwicklung und Erreichen der Marktreife
+- Aufbau eines Vertriebs- und Marketingteams
+- Sicherung strategischer Partnerschaften
+- 18-monatige Runway bis zur Profitabilität
+
+### Exit-Potenzial
+
+Das Team sieht folgende Exit-Optionen:
+1. Strategische Übernahme durch Enterprise-Software-Unternehmen (5-7 Jahre)
+2. Erwerb durch grössere KI-Plattform (3-5 Jahre)
+3. IPO bei Erreichen von CHF 50+ Mio. ARR (7-10 Jahre)
+
+### Extraktion aus ValueOn AG
+
+Vor einem Exit ist die Extraktion der PowerOn AI Platform aus der ValueOn AG in eine eigenständige Organisation vorgesehen:
+
+1. **Vergütung der Aufwände**:
+ - Vollständige Vergütung aller übernommenen Entwicklungskosten
+ - Übernahme der Infrastruktur- und Betriebskosten
+ - Schadloshaltung für alle bisherigen Investitionen
+ - Marketing & Sales-Assets verbleiben bei ValueOn AG ohne Vergütung
+
+2. **Schlüsselpersonen**:
+ - Anrechnung des geschaffenen Mehrwerts für jede Schlüsselperson
+ - Option auf Auszahlung oder Aktienübernahme
+ - Individuelle Vereinbarungen basierend auf Beitrag und Verantwortung
+ - Langfristige Bindung durch Equity-Programme
+
+3. **Investitionskapital**:
+ - Beschaffung des notwendigen Kapitals zum aktuellen Marktwert
+ - Berücksichtigung der Extraktionskosten
+ - Sicherstellung der operativen Liquidität
+ - Finanzierung des weiteren Wachstums
+
+Die Extraktion wird durchgeführt, sobald:
+- Die technische Basis stabil ist
+- Erste Referenzkunden gewonnen wurden
+- Die Marktpositionierung klar ist
+- Die Wachstumsstrategie definiert ist
+
+### Marktwert und Bewertung
+
+#### Aktueller Wert (Juni 2025)
+Basierend auf dem aktuellen Entwicklungsstand und der technologischen Basis:
+
+1. **Technologischer Wert**:
+ - Basis-Frontend-Architektur (modular, aber noch in Entwicklung): CHF 0.15-0.25 Mio.
+ - Backend-Grundstruktur (FastAPI, Basis-Interfaces): CHF 0.2-0.3 Mio.
+ - Workflow-System (Grundfunktionalität): CHF 0.25-0.35 Mio.
+
+2. **Funktionaler Wert**:
+ - Basis-Workflow-Orchestrierung: CHF 0.1-0.15 Mio.
+ - Einfache Dokumentenverarbeitung: CHF 0.05-0.1 Mio.
+ - Grundlegende Benutzerverwaltung: CHF 0.05-0.1 Mio.
+
+3. **Entwicklungspotenzial**:
+ - Erweiterbare Architektur: CHF 0.15-0.2 Mio.
+ - Modulare Struktur: CHF 0.1-0.15 Mio.
+ - Basis für zukünftige Erweiterungen: CHF 0.15-0.2 Mio.
+
+**Aktuelle Gesamtbewertung**: CHF 1.2-1.8 Mio.
+
+Diese Bewertung basiert auf:
+- Dem aktuellen Entwicklungsstand (Frontend und Backend)
+- Der vorhandenen Grundfunktionalität
+- Der modularen Basis-Architektur
+- Dem Entwicklungspotenzial
+
+#### Wert per Ende 2025
+Prognostizierte Bewertung basierend auf:
+- Vervollständigung der Core-Funktionalität
+- Erste Referenzkunden
+- Erweiterte Workflow-Funktionen
+- Verbesserte Benutzeroberfläche
+
+**Prognostizierte Bewertung Ende 2025**: CHF 2-3 Mio.
+
+#### Wert per Ende 2026
+Prognostizierte Bewertung basierend auf:
+- Vollständige Multi-Agent-Implementierung
+- Erweiterte Integrationen
+- Wachsende Kundenbasis
+- Erwartetes ARR von CHF 4,5 Mio.
+
+**Prognostizierte Bewertung Ende 2026**: CHF 4-6 Mio.
+
+Die Wertsteigerung wird getrieben durch:
+
+1. **Technologische Entwicklung**:
+ - Vervollständigung der Agenten-Implementierung
+ - Erweiterung der Workflow-Funktionalitäten
+ - Verbesserung der Integrationen
+
+2. **Marktentwicklung**:
+ - Aufbau der Kundenbasis
+ - Entwicklung von Branchenlösungen
+ - Erste internationale Expansion
+
+3. **Geschäftsentwicklung**:
+ - Wachsende Umsätze
+ - Verbesserte Margen
+ - Neue Geschäftsmodelle
+
+4. **Strategische Positionierung**:
+ - Etablierung in Nischenmärkten
+ - Aufbau von Partnerschaften
+ - Entwicklung proprietärer Technologien
diff --git a/demoData/knowledge-base/platform-overview.html b/demoData/knowledge-base/platform-overview.html
new file mode 100644
index 00000000..684e5c54
--- /dev/null
+++ b/demoData/knowledge-base/platform-overview.html
@@ -0,0 +1,799 @@
+
+
+
+
+
+
+ PowerOn Platform - Big Picture | PowerON
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
PowerOn Platform - Big Picture
+
Enterprise AI Workflow Platform with Integrated Data Privacy Neutralizer
+
This document provides an overview of the PowerOn platform architecture, building blocks, and capabilities for external software developers who want to contribute to or integrate with the platform.
+
+
+
+
+ Overview
+ Customer Story
+ Workflows
+ Microservices
+ RBAC System
+ UI Architecture
+ Big Picture
+ Integration
+
+
+
+
+
Platform Overview
+
+
+
Core Concept
+
PowerOn is a Multi-Agent AI Platform for Enterprise Workflows with an integrated data privacy neutralizer. The platform enables companies to accelerate their AI transformation without data privacy risks.
+
+
+
Key Value Propositions
+
+ Data Privacy First: Integrated privacy neutralizer enables safe use of ChatGPT/Copilot without privacy risks
+ Unlimited Processing: No token limits - process documents of any size through intelligent chunking
+ Universal Integration: Seamless integration of all enterprise data sources
+ Workflow Automation: Configure workflows per customer journey with standard automation elements and AI components
+ Future-Proof Architecture: Automatically improves with better AI models and larger token limits
+ Plug & Play Architecture: Renderers and dynamic AI selection per intention (analyze, generate, web, plan, etc.)
+
+
+
+
+
+
Architecture Layers
+
+
+
UI Layer (Playground)
+
React-based playground UI as entry point. Additional UIs (chatbots, customer UIs) can be easily integrated via REST API in React, JavaScript, or other languages.
+
+
+
API Layer
+
RESTful API providing full access to platform capabilities. Open API design allows external UIs and integrations.
+
+
+
Workflow Engine
+
Core orchestration engine managing tasks, actions, and state. Supports multiple execution modes (Learning, Actionplan, Automation).
+
+
+
Microservices Layer
+
Modular service architecture with specialized services for AI, data processing, security, and integrations.
+
+
+
Data Layer
+
Multi-tenant database with RBAC-based access control. Mandate isolation ensures secure data separation.
+
+
+
+
+
+
Customer Journey → Workflow
+
For each customer journey, a workflow can be configured in the workflow editor where:
+
+ Customers integrate their data sources
+ Standard automation elements are available
+ AI components can be used
+ Workflows can be executed manually or automated (hourly/daily/weekly)
+
+
+
+
+
Plug & Play Architecture
+
+
+
Dynamic Renderers
+
Plug & play architecture for document renderers. Support for multiple formats (PDF, DOCX, XLSX, PPTX, HTML, Markdown, JSON, CSV, etc.) with easy extension capabilities.
+
+
+
Dynamic AI Selection
+
Intelligent AI model selection per intention type. The system automatically selects the best AI model based on the task: analysis, generation, web research, planning, etc.
+
+
+
+
+
+
System Architecture Diagram
+
+
+
+
+
+
+
+
Customer Story
+
+
+
The Journey from Application-Centric to Data-Centric Work
+
PowerOn enables customers to transition from application-centric to data-centric work. This is a key differentiator that transforms how businesses operate.
+
+
+
+
Step 1: Customer Journey Identification
+
+
1
+
+
Identify Business Processes
+
Work with customers to identify their key customer journeys and business processes that can benefit from automation and AI.
+
+ Document analysis workflows
+ Email processing and routing
+ Data extraction and transformation
+ Report generation
+ Customer communication workflows
+
+
+
+
+
+
+
Step 2: MVP Integration with Focus on Data Privacy & Compliance
+
+
2
+
+
Simple MVP Integration
+
Start with a simple MVP that integrates customer data sources with strong focus on data privacy and compliance :
+
+ Data Privacy Neutralizer: Automatic anonymization of sensitive data before AI processing
+ Compliance First: DSGVO/GDPR compliant processing from day one
+ Secure Connections: Encrypted connections to customer data sources (SharePoint, Google Drive, Outlook, etc.)
+ Mandate Isolation: Complete data separation between tenants
+ Audit Logging: Full traceability of all data access and processing
+
+
This step builds trust and demonstrates the platform's commitment to data security.
+
+
+
+
+
+
Step 3: Pre-Processing Engine Deployment
+
+
3
+
+
Standard API Pre-Processing
+
Deploy a pre-processing engine at the customer's location using a standard API :
+
+ On-Premise/Edge Processing: Data processing happens at the customer's location
+ Standard API: Consistent interface for all customers
+ Data Minimization: Only necessary data is sent to the platform
+ Local Neutralization: Privacy neutralization can happen before data leaves customer premises
+ Reduced Latency: Faster processing for large documents
+
+
This step further enhances data privacy and gives customers full control over their data processing.
+
+
+
+
+
+
Step 4: Gradual Component Integration - The Transformation
+
+
4
+
+
From Application-Centric to Data-Centric
+
Gradually integrate additional components until the customer works data-centrically instead of application-centrically :
+
+
+
+
This transformation is a KEY DIFFERENTIATOR! Customers no longer think in terms of applications, but in terms of their data and business processes.
+
+
+
+
+
+
Customer Journey Diagram
+
+
+
+
+
+
+
+
Workflow System
+
+
+
Core Concept: Tasks with Actions
+
The core building block is workflow elements: tasks with actions . Each workflow consists of tasks, and each task contains one or more actions that execute specific operations.
+
+
+
+
Workflow
+
Definition: Top-level container representing a complete customer journey or business process.
+
Purpose: Orchestrates multiple tasks to achieve a business goal.
+
+
+
Task
+
Definition: A logical step in the workflow.
+
Purpose: Groups related actions that work together to complete a sub-goal.
+
+
+
Action
+
Definition: Executable unit that performs a specific operation.
+
Purpose: Actions belong to methods (microservices) and are the atomic units of work.
+
+
+
+
+
+
Execution Modes
+
PowerOn supports three execution modes, each optimized for different use cases:
+
+
+
Learning Mode
+
Best for: Exploratory tasks with up to 5 steps
+
Approach: Iterative Plan-Act-Observe-Refine loop
+
Use Case: When the solution path is not fully known in advance
+
+
+
Actionplan Mode
+
Best for: Structured, sequential processes
+
Approach: Batch planning with sequential execution
+
Use Case: When the workflow steps are well-defined
+
+
+
Automation Mode
+
Best for: Repetitive, predefined workflows
+
Approach: Automated execution (scheduled or event-triggered)
+
Use Case: Production workflows that run automatically
+
+
+
+
+
+
Available Workflow Methods
+
Workflow methods provide actions that can be executed within workflows. Each method exposes multiple actions accessible via self.services.<method>.<action>:
+
+ ai.* - AI operations (process, analyze, generate)
+ sharepoint.* - SharePoint integration (search, read, upload)
+ outlook.* - Outlook integration (read emails, send emails)
+ context.* - Context management (get context, set context)
+
+
+
+
+
Workflow System Diagram
+
+
+
+
+
+
+
+
Microservices Architecture
+
+
+
Service Access Pattern
+
All microservices are accessible via self.services.<serviceName>. Services follow a consistent access pattern and are organized into logical categories.
+
+
+
+
Services Structure Tree
+
Complete overview of all available microservices:
+
+
+
+
Core Services
+
+ self.services.chat - Chat and conversation management
+
+ Progress logging
+ Document management
+ Connection handling
+
+
+ self.services.workflow - Workflow state and management
+ self.services.utils - Utility functions (timestamps, formatting, etc.)
+
+
+
+
+
AI & Processing Services
+
+ self.services.ai - AI model management and operations
+
+ Model selection
+ Prompt processing
+ Response handling
+
+
+ self.services.generation - Document generation
+
+ Multiple formats (PDF, DOCX, XLSX, PPTX, HTML, Markdown, etc.)
+ Template-based rendering
+ JSON schema support
+
+
+ self.services.extraction - Document extraction and processing
+
+ Multiple extractors (PDF, DOCX, XLSX, PPTX, CSV, HTML, XML, JSON, Images, etc.)
+ Intelligent chunking
+ Merging strategies
+
+
+ self.services.neutralization - Data privacy neutralization
+
+ PII detection and anonymization
+ Pattern-based neutralization
+ Binary and text processing
+
+
+
+
+
+
+
Integration Services
+
+ self.services.sharepoint - SharePoint integration
+
+ Site discovery
+ File operations (read, upload, search)
+ Path resolution
+
+
+ self.services.web - Web operations
+
+ HTTP requests
+ Web scraping
+ API integration
+
+
+ self.services.ticket - Ticket system integration
+
+ Jira integration
+ ClickUp integration
+ Generic ticket operations
+
+
+
+
+
+
+
Security & Infrastructure
+
+ self.services.security - Security operations
+
+ Authentication
+ Authorization
+ Token management
+
+
+
+
+
+
+
+
+
Code Examples
+
Examples of how to use services in workflow actions or methods:
+
# In workflow actions or methods
+result = await self.services.<service>.<method>(parameters)
+
+# Example: Using AI service
+response = await self.services.ai.process(prompt="Analyze this document", documents=[...])
+
+# Example: Using SharePoint service
+files = await self.services.sharepoint.searchFiles(pathQuery="sites/my-site/documents")
+
+# Example: Using generation service
+document = self.services.generation.createDocument(format="pdf", content={...})
+
+
+
+
Microservices Architecture Diagram
+
+
+
+
+
+
+
+
RBAC System
+
+
+
Overview
+
The Role-Based Access Control (RBAC) system provides complete UI configuration per tenant and user . It enables fine-grained control over data access, UI visibility, and resource availability.
+
+
+
+
Data Access
+
Table and field-level permissions for database operations. Control who can read, create, update, or delete specific data.
+
+
+
UI Access
+
Component and feature visibility management. Configure exactly which UI elements each user or role can see.
+
+
+
Resource Access
+
System resource availability control. Manage access to AI models, actions, and other platform resources.
+
+
+
+
+
+
Access Levels: Opening Logic
+
For DATA context, the system uses opening rights with four access levels. These levels determine what data a user can access:
+
+
+
none (n)
+
No access - item is completely hidden/disabled
+
+
+
my (m)
+
My records - only records created by the current user
+
+
+
group (g)
+
Group records - records within the same mandate (group context)
+
+
+
all (a)
+
All records - full access to all records in the mandate
+
+
+
+
+
+
View Logic: Open + Close
+
The view attribute controls visibility and enablement. This is the fundamental on/off switch for all RBAC contexts:
+
+ view: true - Item is visible/enabled
+ view: false - Item is hidden/disabled (regardless of other permissions)
+
+
Key Rule: Only objects with view: true are shown. This applies to:
+
+ DATA Context: Controls whether tables/fields are accessible
+ UI Context: Controls whether UI elements are visible
+ RESOURCE Context: Controls whether resources are available
+
+
+
+
+
Rule Specificity & Hierarchy
+
The RBAC system uses a cascading hierarchy where more specific rules override generic ones:
+
+ Generic Rules (item = null) - Apply to all items in context
+ Specific Rules (item = "table.field" or item = "ui.component.feature") - Override generic rules
+
+
Resolution Logic: Within a single role, the most specific rule wins. Across multiple roles, opening (union) logic applies - if ANY role enables something, it is enabled.
+
+
+
+
Opening Rights Principle
+
For DATA context, read permission (R) is a prerequisite for create/update/delete operations (CUD). This ensures data integrity and proper access control:
+
+ If Read = "n": No CUD operations allowed
+ If Read = "m": CUD operations limited to "m" or "n"
+ If Read = "g": CUD operations limited to "g", "m", or "n"
+ If Read = "a": CUD operations can be "a", "g", "m", or "n"
+
+
Key Rule: You can ONLY create/update/delete if you have read right.
+
+
+
+
Context Types
+
RBAC rules apply to three different context types, each serving a specific purpose:
+
+
+
DATA
+
Database tables and fields. Controls read/create/update/delete permissions.
+
Example: item: "UserInDB.email"
+
+
+
UI
+
UI elements and features. Controls component visibility.
+
Example: item: "playground.voice.settings"
+
+
+
RESOURCE
+
System resources (AI models, actions, etc.). Controls resource availability.
+
Example: item: "ai.model.anthropic"
+
+
+
+
+
+
RBAC System Diagram
+
+
+
+
+
+
+
+
UI Architecture
+
+
+
Playground UI
+
The Playground serves as the main entry point and demonstration UI. It's built with React and provides a comprehensive interface for workflow interaction:
+
+ Chat interface for workflow interaction
+ Workflow editor for configuration
+ Document management
+ Connection management
+ Voice input/output capabilities
+
+
+
+
+
RBAC-Driven UI Configuration
+
The UI is completely configurable via RBAC rules . This allows customers to configure exactly the UI they need for their use case:
+
+ Per tenant configuration
+ Per user configuration
+ Component-level visibility control
+ Feature-level access control
+
+
This allows customers to configure exactly the UI they need for their use case.
+
+
+
+
External UI Integration
+
Additional UIs can be easily integrated via the REST API. All UI components communicate with the platform through the standardized REST API, ensuring consistent behavior and security:
+
+ Chatbots: Build custom chatbots using the workflow API
+ Customer UIs: Create customer-specific interfaces in React, JavaScript, or other languages
+ Mobile Apps: Integrate via REST API from mobile applications
+ Third-Party Tools: Connect existing tools via webhooks and API
+
+
All UI components communicate with the platform through the standardized REST API, ensuring consistent behavior and security.
+
+
+
+
Available UI Components
+
The platform provides reusable UI components that can be configured via RBAC:
+
+ Chat interface
+ Document viewer/editor
+ Workflow editor
+ Connection manager
+ Settings panels
+ Dashboard widgets
+
+
+
+
+
UI Architecture Diagram
+
+
+
+
+
+
+
+
Big Picture & Future Vision
+
+
+
Vendor-Independent Platform
+
+
AI Model Independence
+
PowerOn is designed as a vendor-independent platform regarding AI models:
+
+ Support for multiple AI providers (OpenAI, Anthropic, Google, Azure, etc.)
+ Dynamic model selection based on task requirements
+ Easy addition of new AI providers
+ No vendor lock-in - customers can switch providers seamlessly
+
+
+
+
+
Connector Independence
+
Universal connector architecture supporting all major platforms:
+
+ Microsoft: SharePoint, Outlook, Teams, OneDrive, Azure
+ Google: Drive, Gmail, Workspace, Cloud
+ Amazon: AWS services, S3, etc.
+ Other: Jira, Slack, Salesforce, and many more
+
+
Customers are not locked into a single vendor ecosystem.
+
+
+
+
+
Graphical Workflow Modeling
+
+
Visual Customer Journey Design
+
Future capability to graphically model workflows for customer journeys:
+
+ Drag-and-drop workflow editor
+ Visual representation of customer journeys
+ Easy workflow modification without coding
+ Template library for common workflows
+ Workflow versioning and testing
+
+
This makes workflow creation accessible to business users, not just developers.
+
+
+
+
+
MCP Integration in Customer Copilot
+
+
Microsoft Copilot Plugin Architecture
+
Integration of PowerOn actions as MCP (Model Context Protocol) plugins in the customer's Copilot:
+
+ Native Copilot Integration: PowerOn workflows accessible directly from Microsoft Copilot
+ Action Library: All PowerOn actions available as Copilot plugins
+ Seamless Experience: Customers use PowerOn capabilities without leaving Copilot
+ Enterprise Workflows: Complex workflows triggered from simple Copilot conversations
+ Data Privacy: All PowerOn privacy features work seamlessly in Copilot context
+
+
This enables customers to leverage PowerOn's powerful workflow capabilities directly from their familiar Copilot interface.
+
+
+
+
+
Platform Evolution
+
+
+
Today
+
+ REST API-based workflows
+ Playground UI
+ Multiple AI providers
+ Standard connectors
+
+
+
+
Near Future
+
+ Graphical workflow editor
+ MCP Copilot integration
+ Enhanced pre-processing
+ Advanced AI selection
+
+
+
+
Future
+
+ AI-powered workflow generation
+ Multi-platform Copilot support
+ Edge computing expansion
+ Federated learning
+
+
+
+
+
+
+
Big Picture & Future Vision Diagram
+
+
+
+
+
+
+
+
Integration Guide
+
+
+
REST API
+
The platform exposes a comprehensive REST API for all operations. This API serves as the primary integration point for external developers:
+
+ Workflow API: Create, execute, and manage workflows
+ Document API: Upload, download, and process documents
+ Connection API: Manage external connections (SharePoint, Outlook, etc.)
+ RBAC API: Manage roles and permissions
+ Options API: Dynamic options for UI components
+
+
+
+
+
Building Blocks for Developers
+
Developers can extend the platform by creating custom components in these areas:
+
+
+
Workflow Methods
+
Create custom workflow methods by extending MethodBase and registering actions.
+
+
+
Services
+
Extend the services layer by creating new service modules following the existing pattern.
+
+
+
Connectors
+
Build connectors for external systems (databases, APIs, services) using the connector interface.
+
+
+
UI Components
+
Create React components that integrate with the REST API and respect RBAC rules.
+
+
+
+
+
+
Development Workflow
+
Follow these steps to get started with platform development:
+
+ Understand the Architecture: Review this document and codebase structure
+ Set Up Development Environment: Clone repository and configure local environment
+ Choose Integration Point: Decide whether to extend workflows, services, or UI
+ Follow Patterns: Use existing code as reference for consistent implementation
+ Test with RBAC: Ensure your changes respect RBAC rules
+ Document: Update documentation for your changes
+
+
+
+
+
Key Integration Points
+
Main directories where developers can add new functionality:
+
+ gateway/modules/workflows/methods/ - Add new workflow methods
+ gateway/modules/services/ - Add new microservices
+ gateway/modules/connectors/ - Add new connectors
+ gateway/modules/routes/ - Add new API endpoints
+ gateway/modules/features/ - Add new features
+
+
+
+
+
+
+
+
+
+
diff --git a/demoData/knowledge-base/referenzen.html b/demoData/knowledge-base/referenzen.html
new file mode 100644
index 00000000..0e02d023
--- /dev/null
+++ b/demoData/knowledge-base/referenzen.html
@@ -0,0 +1,880 @@
+
+
+
+
+
+ PowerOn Kunden und Nutzereferenzen
+
+
+
+
+
+
+
+
+
Leistungsbausteine
+
+
+
Impact Sessions
+
+ Orientierung für Entscheiderinnen und Entscheider
+ Klärung Nutzen, Risiken, nächste Schritte
+
+
+
+
+
Deep Dives & Academy-Module
+
+ Hands-on Training mit echten Business Cases
+ Transfer in konkrete Arbeitsabläufe
+
+
+
+
+
Workshops / Prototyping
+
+ Definition von Use Cases und KPI
+ Rapid Prototyping bis funktionsfähiges MVP
+
+
+
+
+
Transformation Labs
+
+ Begleitung bis Umsetzung und Go-Live
+ Skalierung und Betrieb
+
+
+
+
+
+
+
Referenz-Use-Cases (ohne Kundendaten)
+
+
+
Prozessautomatisierung und KPI-Produkt
+
+
Kontext
+
Hoher manueller Aufwand und intransparente Kosten in Spesen und Controlling bremsen das Tagesgeschäft
+
+
Ziel
+
Operative Kosten senken und Steuerungsfähigkeit erhöhen durch standardisierte, schnellere Freigaben
+
+
Lösung
+
End‑to‑End Workflow in PowerOn mit automatischer Belegerfassung, Prüfung und KPI‑Auswertung
+
+
Ergebnis
+
Kürzere Durchlaufzeiten und jederzeit transparente Kennzahlen
+
+
+
Prozessablauf:
+
+
Beleg
+
→
+
Erfassen
+
→
+
Validieren
+
→
+
Genehmigen
+
→
+
Buchen
+
→
+
KPI‑Dashboard
+
+
+
+
+
+
Enterprise-Features skalieren für bestehende Lösung
+
+
Kontext
+
Wachsende Nutzerzahlen und steigende Anforderungen gefährden die wahrgenommene Servicequalität
+
+
Ziel
+
Verlässliche Skalierbarkeit sicherstellen und Kundenzufriedenheit schützen
+
+
Lösung
+
Rollen- und Berechtigungskonzept erweitern, Performance optimieren und Betriebsprozesse festigen
+
+
Ergebnis
+
Hohe Stabilität, schnellere Antwortzeiten und sicherer Betrieb
+
+
+
Prozessablauf:
+
+
Users
+
→
+
Auth/Rollen
+
→
+
Services
+
→
+
Queue/Jobs
+
→
+
Monitoring
+
→
+
SLO/SLA
+
+
+
+
+
+
Management-Alignment und Entscheidvorbereitung
+
+
Kontext
+
Strategische Weichenstellung für KI erfordert breite Abstützung und klare Investitionssicht
+
+
Ziel
+
Entscheidungssicherheit auf GL‑Ebene schaffen und Investitionen fokussieren
+
+
Lösung
+
Kompakte Impact‑Session mit Variantenvergleich und klarer Roadmap
+
+
Ergebnis
+
Verbindliche Entscheide zu Scope, Budget und Zeitplan
+
+
+
Prozessablauf:
+
+
Ausgangslage
+
→
+
Optionen
+
→
+
Kosten/Nutzen
+
→
+
Roadmap
+
→
+
Entscheid (GL)
+
+
+
+
+
+
Tech‑Workshops zu Multi‑Agent‑Architektur
+
+
Kontext
+
Unterschiedliche Vorgehensweisen und Standards verlangsamen Delivery und erschweren Skalierung
+
+
Ziel
+
Gemeinsame Spielregeln schaffen, um Time‑to‑Value zu verkürzen und konsistente Qualität sicherzustellen
+
+
Lösung
+
Klare Architekturprinzipien, verbindliche Standards und kollaborative Working Agreements
+
+
Ergebnis
+
Einheitliche Regeln, eindeutige Verantwortlichkeiten und eine belastbare Sprint‑Roadmap
+
+
+
Prozessablauf:
+
+
Pain Points
+
→
+
Prinzipien
+
→
+
Standards
+
→
+
Working Agreements
+
→
+
Sprint‑Roadmap
+
+
+
+
+
+
Data & Analytics Demo / Reporting
+
+
Kontext
+
Entscheidungen werden mit Bauchgefühl statt mit einheitlichen Zahlen getroffen
+
+
Ziel
+
Entscheidungen im Fachbereich konsequent datenbasiert treffen
+
+
Lösung
+
Schlanke Datenaufbereitung mit PowerOn‑Pipelines und Visualisierung im BI‑Tool
+
+
Ergebnis
+
Entscheidungsreife KPIs auf einen Blick
+
+
+
Prozessablauf:
+
+
Datenquellen
+
→
+
Bereinigen/Joinen
+
→
+
KPIs berechnen
+
→
+
Dashboard (BI)
+
+
+
+
+
+
Code‑Modernisierung und Analyse
+
+
Kontext
+
Veraltete Codebasis bremst Releases, erhöht Betriebsrisiken und erschwert neue Features
+
+
Ziel
+
Risiken in Legacy‑Code reduzieren und Zukunftsfähigkeit herstellen
+
+
Lösung
+
Systematische Code‑Analyse mit klaren Migrationspfaden und schnellen Verbesserungen
+
+
Ergebnis
+
Priorisierte Massnahmen mit messbarem Risikoabbau
+
+
+
Prozessablauf:
+
+
Systeme
+
→
+
Code‑Analyse
+
→
+
Risiken bewerten
+
→
+
Migrationspfade
+
→
+
Quick Wins
+
→
+
Stabiler Release
+
+
+
+
+
+
+
Typische Resultate
+
+
+
Effizienzsteigerung
+
+ 30–70% Zeiteinsparung in Zielprozessen (je nach Ausgangslage)
+ Schnellere Entscheide dank standardisierten Artefakten und Dashboards
+
+
+
+
+
Risikoreduktion
+
+ Reduzierte Betriebsrisiken durch klare Architektur- und Qualitätsstandards
+ Höhere Akzeptanz durch Einbindung von Stakeholdern früh im Prozess
+
+
+
+
+
+
+
Vorgehen (Kurz)
+
+
+
1. Discovery
+
+ Ziele, Ist‑Prozess, Datenlage
+
+
+
+
+
2. Prototyp
+
+ Schlanker End‑to‑End‑Flow mit messbarem Nutzen
+
+
+
+
+
3. Skalierung
+
+ Security, Performance, Betrieb
+
+
+
+
+
4. Transition
+
+ Übergabe oder Betrieb durch PowerOn‑Team
+
+
+
+
+
+
+
Gemeinsamer Start
+
+
+
Vorbereitung
+
+ Use‑Case shortlist definieren
+ 2‑h Impact‑Session terminieren
+
+
+
+
+
Umsetzung
+
+ MVP‑Scope und Erfolgskriterien festlegen
+ Sprint‑Planung starten
+
+
+
+
+
+
+
+
+
+
diff --git a/demoData/neutralizer/_generateTenantDossierPdf.py b/demoData/neutralizer/_generateTenantDossierPdf.py
new file mode 100644
index 00000000..2d4f5a02
--- /dev/null
+++ b/demoData/neutralizer/_generateTenantDossierPdf.py
@@ -0,0 +1,57 @@
+"""Generate tenant-dossier.pdf for neutralization demo. Run: python _generateTenantDossierPdf.py
+
+Uses ReportLab so the PDF opens reliably in all viewers (stdlib-only PDFs are fragile).
+"""
+from pathlib import Path
+
+from reportlab.lib.pagesizes import A4
+from reportlab.pdfgen import canvas
+
+
+def _main():
+ outPath = Path(__file__).resolve().parent / "tenant-dossier.pdf"
+ c = canvas.Canvas(str(outPath), pagesize=A4)
+ _, h = A4
+ margin = 72
+ y = h - margin
+ c.setFont("Helvetica-Bold", 13)
+ c.drawString(margin, y, "Tenant dossier (demo) - confidential")
+ y -= 22
+ c.setFont("Helvetica", 11)
+ lines = [
+ "Fictional demo data for neutralization testing.",
+ "",
+ "Tenant name: Hans Muster",
+ "Date of birth: 14.03.1982",
+ "Nationality: Swiss",
+ "",
+ "Residential address:",
+ "Bahnhofstrasse 1",
+ "8001 Zurich",
+ "Switzerland",
+ "",
+ "Email: hans.muster@example-mail.demo",
+ "Phone: +41 79 123 45 67",
+ "",
+ "Lease reference: LE-2024-88421",
+ "Monthly rent: CHF 2450.00",
+ "Deposit held: CHF 7350.00",
+ "",
+ "Employer: Demo Consulting AG, Limmatquai 78, 8001 Zurich",
+ "",
+ "Notes: Tenant requested balcony repair (ticket REQ-992).",
+ ]
+ lineHeight = 14
+ for line in lines:
+ if y < margin and line:
+ c.showPage()
+ c.setFont("Helvetica", 11)
+ y = h - margin
+ c.drawString(margin, y, line)
+ y -= lineHeight
+ c.save()
+ print(f"Wrote {outPath}")
+
+
+if __name__ == "__main__":
+ _main()
diff --git a/demoData/neutralizer/tenant-dossier.pdf b/demoData/neutralizer/tenant-dossier.pdf
new file mode 100644
index 00000000..a6429f1b
--- /dev/null
+++ b/demoData/neutralizer/tenant-dossier.pdf
@@ -0,0 +1,74 @@
+%PDF-1.3
+% ReportLab Generated PDF document (opensource)
+1 0 obj
+<<
+/F1 2 0 R /F2 3 0 R
+>>
+endobj
+2 0 obj
+<<
+/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
+>>
+endobj
+3 0 obj
+<<
+/BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font
+>>
+endobj
+4 0 obj
+<<
+/Contents 8 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 7 0 R /Resources <<
+/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
+>> /Rotate 0 /Trans <<
+
+>>
+ /Type /Page
+>>
+endobj
+5 0 obj
+<<
+/PageMode /UseNone /Pages 7 0 R /Type /Catalog
+>>
+endobj
+6 0 obj
+<<
+/Author (anonymous) /CreationDate (D:20260413002929+02'00') /Creator (anonymous) /Keywords () /ModDate (D:20260413002929+02'00') /Producer (ReportLab PDF Library - \(opensource\))
+ /Subject (unspecified) /Title (untitled) /Trapped /False
+>>
+endobj
+7 0 obj
+<<
+/Count 1 /Kids [ 4 0 R ] /Type /Pages
+>>
+endobj
+8 0 obj
+<<
+/Filter [ /ASCII85Decode /FlateDecode ] /Length 654
+>>
+stream
+Gasam9lnc;&A@g>lnO(2=RrpscmGHAZie8p-5Y3=@t?5.P!"j*HK;Fi@]13b1HoLNhXc)p>lp^JaPgD8!#HB_>8&+nWYS,F`)(;Y@)>/R[H=Dq4)8esgGpgXQD3IM$H$"2L[$s#Dk8hf2E>G=!I\)qcAifY?5kL#lX:umL)C2t<$6-:MY6mu9k?#W%2[oR^VsI+.!d4gq#g2k1Vj8HiJIpNf:t7&r:FE<6naroO=f7-A\)mh3K+#;jO=Q5$Z^pXYXcahlq@-EPABR+A_HCPde%4"G)Q2m;h-`b6ENmFFmS1/_)fuc50.gZ!l8E@]BR[V=I5)R1mE7:'u=chT!!'f^Xe@:2KoYE13Fj#R5slPDniWfK\L\endstream
+endobj
+xref
+0 9
+0000000000 65535 f
+0000000061 00000 n
+0000000102 00000 n
+0000000209 00000 n
+0000000321 00000 n
+0000000524 00000 n
+0000000592 00000 n
+0000000853 00000 n
+0000000912 00000 n
+trailer
+<<
+/ID
+[]
+% ReportLab generated PDF document -- digest (opensource)
+
+/Info 6 0 R
+/Root 5 0 R
+/Size 9
+>>
+startxref
+1656
+%%EOF
diff --git a/demoData/trustee/Budget2026a.xlsx b/demoData/trustee/Budget2026a.xlsx
new file mode 100644
index 00000000..13ee3547
Binary files /dev/null and b/demoData/trustee/Budget2026a.xlsx differ
diff --git a/env_dev.env b/env_dev.env
index 30ffd079..9c13506f 100644
--- a/env_dev.env
+++ b/env_dev.env
@@ -4,7 +4,7 @@
APP_ENV_TYPE = dev
APP_ENV_LABEL = Development Instance Patrick
APP_API_URL = http://localhost:8000
-APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/key.txt
+APP_KEY_SYSVAR = D:/Athi/Local/Web/poweron/local/notes/key.txt
APP_INIT_PASS_ADMIN_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpEeFFtRGtQeVUtcjlrU3dab1ZxUm9WSks0MlJVYUtERFlqUElHemZrOGNENk1tcmJNX3Vxc01UMDhlNU40VzZZRVBpUGNmT3podzZrOGhOeEJIUEt4eVlSWG5UYXA3d09DVXlLT21Kb1JYSUU9
APP_INIT_PASS_EVENT_SECRET = DEV_ENC:Z0FBQUFBQm8xSUpERzZjNm56WGVBdjJTeG5Udjd6OGQwUVotYXUzQjJ1YVNyVXVBa3NZVml3ODU0MVNkZjhWWmJwNUFkc19BcHlHMTU1Q3BRcHU0cDBoZkFlR2l6UEZQU3d2U3MtMDh5UDZteGFoQ0EyMUE1ckE9
diff --git a/modules/aicore/aicorePluginAnthropic.py b/modules/aicore/aicorePluginAnthropic.py
index 81a2175e..12cfcbe7 100644
--- a/modules/aicore/aicorePluginAnthropic.py
+++ b/modules/aicore/aicorePluginAnthropic.py
@@ -71,6 +71,7 @@ class AiAnthropic(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 9),
(OperationTypeEnum.DATA_EXTRACT, 8),
(OperationTypeEnum.AGENT, 9),
+ (OperationTypeEnum.DATA_QUERY, 9),
),
version="claude-sonnet-4-5-20250929",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.003 + (bytesReceived / 4 / 1000) * 0.015
@@ -97,6 +98,7 @@ class AiAnthropic(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 8),
(OperationTypeEnum.DATA_EXTRACT, 7),
(OperationTypeEnum.AGENT, 7),
+ (OperationTypeEnum.DATA_QUERY, 10),
),
version="claude-haiku-4-5-20251001",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.001 + (bytesReceived / 4 / 1000) * 0.005
@@ -123,6 +125,7 @@ class AiAnthropic(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 10),
(OperationTypeEnum.DATA_EXTRACT, 9),
(OperationTypeEnum.AGENT, 10),
+ (OperationTypeEnum.DATA_QUERY, 3),
),
version="claude-opus-4-6",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.005 + (bytesReceived / 4 / 1000) * 0.025
diff --git a/modules/aicore/aicorePluginMistral.py b/modules/aicore/aicorePluginMistral.py
index 885addcf..d2ad0694 100644
--- a/modules/aicore/aicorePluginMistral.py
+++ b/modules/aicore/aicorePluginMistral.py
@@ -67,6 +67,7 @@ class AiMistral(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 9),
(OperationTypeEnum.DATA_EXTRACT, 8),
(OperationTypeEnum.AGENT, 8),
+ (OperationTypeEnum.DATA_QUERY, 7),
),
version="mistral-large-latest",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0005 + (bytesReceived / 4 / 1000) * 0.0015
@@ -93,6 +94,7 @@ class AiMistral(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 8),
(OperationTypeEnum.DATA_EXTRACT, 7),
(OperationTypeEnum.AGENT, 6),
+ (OperationTypeEnum.DATA_QUERY, 9),
),
version="mistral-small-latest",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00006 + (bytesReceived / 4 / 1000) * 0.00018
diff --git a/modules/aicore/aicorePluginOpenai.py b/modules/aicore/aicorePluginOpenai.py
index 3b9f2c5f..ae5a02b3 100644
--- a/modules/aicore/aicorePluginOpenai.py
+++ b/modules/aicore/aicorePluginOpenai.py
@@ -68,6 +68,7 @@ class AiOpenai(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 10),
(OperationTypeEnum.DATA_EXTRACT, 7),
(OperationTypeEnum.AGENT, 9),
+ (OperationTypeEnum.DATA_QUERY, 8),
),
version="gpt-4o",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.0025 + (bytesReceived / 4 / 1000) * 0.01
@@ -95,6 +96,7 @@ class AiOpenai(BaseConnectorAi):
(OperationTypeEnum.DATA_GENERATE, 9),
(OperationTypeEnum.DATA_EXTRACT, 7),
(OperationTypeEnum.AGENT, 8),
+ (OperationTypeEnum.DATA_QUERY, 10),
),
version="gpt-4o-mini",
calculatepriceCHF=lambda processingTime, bytesSent, bytesReceived: (bytesSent / 4 / 1000) * 0.00015 + (bytesReceived / 4 / 1000) * 0.0006
diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py
index 2c7eeab3..e92d7b6f 100644
--- a/modules/connectors/connectorDbPostgre.py
+++ b/modules/connectors/connectorDbPostgre.py
@@ -946,13 +946,14 @@ class DatabaseConnector:
if recordFilter:
for field, value in recordFilter.items():
if value is None:
- # Use IS NULL for None values (= NULL is always false in SQL)
where_conditions.append(f'"{field}" IS NULL')
+ elif isinstance(value, list):
+ where_conditions.append(f'"{field}" = ANY(%s)')
+ where_values.append(value)
else:
where_conditions.append(f'"{field}" = %s')
where_values.append(value)
- # Build the query
if where_conditions:
where_clause = " WHERE " + " AND ".join(where_conditions)
else:
@@ -1040,7 +1041,7 @@ class DatabaseConnector:
colType = fields.get(key, "TEXT")
logger.debug(f"_buildPaginationClauses: filter key='{key}' val={val!r} type(val)={type(val).__name__} colType={colType}")
if val is None:
- where_parts.append(f'"{key}" IS NULL')
+ where_parts.append(f'("{key}" IS NULL OR "{key}" = \'\')')
continue
if isinstance(val, dict):
op = val.get("operator", "equals")
@@ -1113,13 +1114,15 @@ class DatabaseConnector:
orderParts: List[str] = []
if pagination and pagination.sort:
for sf in pagination.sort:
- if sf.field in validColumns:
- direction = "DESC" if sf.direction.lower() == "desc" else "ASC"
- colType = fields.get(sf.field, "TEXT")
+ sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None)
+ sfDir = sf.get("direction", "asc") if isinstance(sf, dict) else getattr(sf, "direction", "asc")
+ if sfField and sfField in validColumns:
+ direction = "DESC" if str(sfDir).lower() == "desc" else "ASC"
+ colType = fields.get(sfField, "TEXT")
if colType == "BOOLEAN":
- orderParts.append(f'COALESCE("{sf.field}", FALSE) {direction}')
+ orderParts.append(f'COALESCE("{sfField}", FALSE) {direction}')
else:
- orderParts.append(f'"{sf.field}" {direction} NULLS LAST')
+ orderParts.append(f'"{sfField}" {direction} NULLS LAST')
if not orderParts:
orderParts.append('"id"')
order_clause = " ORDER BY " + ", ".join(orderParts)
diff --git a/modules/connectors/connectorVoiceGoogle.py b/modules/connectors/connectorVoiceGoogle.py
index 0dbb46a5..aebede8a 100644
--- a/modules/connectors/connectorVoiceGoogle.py
+++ b/modules/connectors/connectorVoiceGoogle.py
@@ -18,9 +18,13 @@ from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
-# Gemini-TTS speaker IDs from voices.list use short names (e.g. "Kore") and require model_name + prompt.
+# Gemini-TTS speaker IDs from voices.list use short names (e.g. "Kore") and require
+# SynthesisInput.prompt + VoiceSelectionParams.model_name (google-cloud-texttospeech >= 2.24.0).
_GEMINI_TTS_DEFAULT_MODEL = "gemini-2.5-flash-tts"
_GEMINI_TTS_NEUTRAL_PROMPT = "Say the following"
+_GEMINI_TTS_MIN_CLIENT_HINT = (
+ "Gemini-TTS requires google-cloud-texttospeech>=2.24.0 (SynthesisInput.prompt, VoiceSelectionParams.model_name)."
+)
class ConnectorGoogleSpeech:
@@ -940,7 +944,9 @@ class ConnectorGoogleSpeech:
logger.info(f"Using TTS voice: {selectedVoice} for language: {languageCode}")
- if self._isGeminiTtsSpeakerVoiceName(selectedVoice):
+ isGeminiVoice = self._isGeminiTtsSpeakerVoiceName(selectedVoice)
+
+ if isGeminiVoice:
synthesisInput = texttospeech.SynthesisInput(
text=text,
prompt=_GEMINI_TTS_NEUTRAL_PROMPT,
@@ -958,19 +964,17 @@ class ConnectorGoogleSpeech:
name=selectedVoice,
ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL,
)
-
- # Select the type of audio file to return
+
audioConfig = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
-
- # Perform the text-to-speech request
+
response = self.tts_client.synthesize_speech(
input=synthesisInput,
voice=voice,
audio_config=audioConfig
)
-
+
# Return the audio content
return {
"success": True,
@@ -982,9 +986,14 @@ class ConnectorGoogleSpeech:
except Exception as e:
logger.error(f"Text-to-Speech error: {e}")
+ detail = str(e)
+ extra = ""
+ low = detail.lower()
+ if "prompt" in low or "model_name" in low or "unknown field" in low:
+ extra = f" {_GEMINI_TTS_MIN_CLIENT_HINT}"
return {
"success": False,
- "error": f"Text-to-Speech failed: {str(e)}"
+ "error": f"Text-to-Speech failed: {detail}{extra}",
}
def _getDefaultVoice(self, languageCode: str) -> str:
diff --git a/modules/datamodels/datamodelAi.py b/modules/datamodels/datamodelAi.py
index 662eded2..a581a7e8 100644
--- a/modules/datamodels/datamodelAi.py
+++ b/modules/datamodels/datamodelAi.py
@@ -32,6 +32,7 @@ class OperationTypeEnum(str, Enum):
# Agent Operations
AGENT = "agent" # Agent loop: reasoning + tool use
+ DATA_QUERY = "dataQuery" # Data query sub-agent: fast model, schema-aware
# Embedding Operations
EMBEDDING = "embedding" # Text → vector conversion for semantic search
diff --git a/modules/datamodels/datamodelAudit.py b/modules/datamodels/datamodelAudit.py
index 76c9ecfb..f95b213d 100644
--- a/modules/datamodels/datamodelAudit.py
+++ b/modules/datamodels/datamodelAudit.py
@@ -20,7 +20,7 @@ from enum import Enum
import uuid
from modules.shared.timeUtils import getUtcTimestamp
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
class AuditCategory(str, Enum):
@@ -82,6 +82,7 @@ class AuditAction(str, Enum):
CONFIG_CHANGE = "config_change"
+@i18nModel("Audit-Log-Eintrag")
class AuditLogEntry(BaseModel):
"""
Audit log entry for database storage.
@@ -92,117 +93,94 @@ class AuditLogEntry(BaseModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique identifier for the audit entry",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
# Timestamp
timestamp: float = Field(
default_factory=getUtcTimestamp,
description="UTC timestamp when the event occurred",
- json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Zeitstempel", "frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True}
)
-
+
# Actor identification
userId: str = Field(
description="ID of the user who performed the action (or 'system' for system events)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Benutzer-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
-
+
username: Optional[str] = Field(
default=None,
description="Username at the time of the event (for historical reference)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Benutzername", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
# Context
mandateId: Optional[str] = Field(
default=None,
description="Mandate context (if applicable)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Mandanten-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
featureInstanceId: Optional[str] = Field(
default=None,
description="Feature instance context (if applicable)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Feature-Instanz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
# Event classification
category: str = Field(
description="Event category (access, key, data, security, gdpr, permission, system)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Kategorie", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
-
+
action: str = Field(
description="Specific action performed",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Aktion", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
-
+
# Event details
resourceType: Optional[str] = Field(
default=None,
description="Type of resource affected (e.g., 'User', 'ChatWorkflow', 'TrusteeContract')",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Ressourcentyp", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
resourceId: Optional[str] = Field(
default=None,
description="ID of the affected resource",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Ressourcen-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
details: Optional[str] = Field(
default=None,
description="Additional details about the event",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Details", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
)
-
+
# Request metadata
ipAddress: Optional[str] = Field(
default=None,
description="IP address of the client",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "IP-Adresse", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
userAgent: Optional[str] = Field(
default=None,
description="User agent string from the request",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "User-Agent", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
+
# Outcome
success: bool = Field(
default=True,
description="Whether the action was successful",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Erfolgreich", "frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": True}
)
-
+
errorMessage: Optional[str] = Field(
default=None,
description="Error message if the action failed",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Fehlermeldung", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
)
-
-# Register labels for internationalization
-registerModelLabels(
- "AuditLogEntry",
- {"en": "Audit Log Entry", "de": "Audit-Log-Eintrag", "fr": "Entrée du journal d'audit"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "timestamp": {"en": "Timestamp", "de": "Zeitstempel", "fr": "Horodatage"},
- "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"},
- "username": {"en": "Username", "de": "Benutzername", "fr": "Nom d'utilisateur"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "de": "Feature-Instanz-ID", "fr": "ID de l'instance"},
- "category": {"en": "Category", "de": "Kategorie", "fr": "Catégorie"},
- "action": {"en": "Action", "de": "Aktion", "fr": "Action"},
- "resourceType": {"en": "Resource Type", "de": "Ressourcentyp", "fr": "Type de ressource"},
- "resourceId": {"en": "Resource ID", "de": "Ressourcen-ID", "fr": "ID de ressource"},
- "details": {"en": "Details", "de": "Details", "fr": "Détails"},
- "ipAddress": {"en": "IP Address", "de": "IP-Adresse", "fr": "Adresse IP"},
- "userAgent": {"en": "User Agent", "de": "User-Agent", "fr": "Agent utilisateur"},
- "success": {"en": "Success", "de": "Erfolgreich", "fr": "Succès"},
- "errorMessage": {"en": "Error Message", "de": "Fehlermeldung", "fr": "Message d'erreur"},
- },
-)
diff --git a/modules/datamodels/datamodelBase.py b/modules/datamodels/datamodelBase.py
index 862f177b..854be75e 100644
--- a/modules/datamodels/datamodelBase.py
+++ b/modules/datamodels/datamodelBase.py
@@ -6,14 +6,17 @@ from typing import Optional
from pydantic import BaseModel, Field
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
+@i18nModel("Basisdatensatz")
class PowerOnModel(BaseModel):
+ """Basis-Datenmodell mit System-Audit-Feldern fuer alle DB-Tabellen."""
sysCreatedAt: Optional[float] = Field(
default=None,
description="Record creation timestamp (UTC, set by system)",
json_schema_extra={
+ "label": "Erstellt am",
"frontend_type": "timestamp",
"frontend_readonly": True,
"frontend_required": False,
@@ -25,6 +28,7 @@ class PowerOnModel(BaseModel):
default=None,
description="User ID who created this record (set by system)",
json_schema_extra={
+ "label": "Erstellt von",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -36,6 +40,7 @@ class PowerOnModel(BaseModel):
default=None,
description="Record last modification timestamp (UTC, set by system)",
json_schema_extra={
+ "label": "Geaendert am",
"frontend_type": "timestamp",
"frontend_readonly": True,
"frontend_required": False,
@@ -47,6 +52,7 @@ class PowerOnModel(BaseModel):
default=None,
description="User ID who last modified this record (set by system)",
json_schema_extra={
+ "label": "Geaendert von",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -54,15 +60,3 @@ class PowerOnModel(BaseModel):
"system": True,
},
)
-
-
-registerModelLabels(
- "PowerOnModel",
- {"en": "Base Record", "de": "Basisdatensatz"},
- {
- "sysCreatedAt": {"en": "Created At", "de": "Erstellt am", "fr": "Cree le"},
- "sysCreatedBy": {"en": "Created By", "de": "Erstellt von", "fr": "Cree par"},
- "sysModifiedAt": {"en": "Modified At", "de": "Geaendert am", "fr": "Modifie le"},
- "sysModifiedBy": {"en": "Modified By", "de": "Geaendert von", "fr": "Modifie par"},
- },
-)
diff --git a/modules/datamodels/datamodelBilling.py b/modules/datamodels/datamodelBilling.py
index ccf1f4a1..fb1a1061 100644
--- a/modules/datamodels/datamodelBilling.py
+++ b/modules/datamodels/datamodelBilling.py
@@ -7,7 +7,7 @@ from enum import Enum
from datetime import date, datetime, timezone
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
# End-customer price for storage above plan-included volume (CHF per GB per month).
@@ -38,203 +38,170 @@ class PeriodTypeEnum(str, Enum):
YEAR = "YEAR"
+@i18nModel("Abrechnungskonto")
class BillingAccount(PowerOnModel):
"""Billing account for mandate or user-mandate combination."""
id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
)
- mandateId: str = Field(..., description="Foreign key to Mandate")
- userId: Optional[str] = Field(None, description="Foreign key to User (None = mandate pool account, set = user audit account)")
- balance: float = Field(default=0.0, description="Current balance in CHF")
- warningThreshold: float = Field(default=0.0, description="Warning threshold in CHF")
- lastWarningAt: Optional[datetime] = Field(None, description="Last warning sent timestamp")
- enabled: bool = Field(default=True, description="Account is active")
-
-
-registerModelLabels(
- "BillingAccount",
- {"en": "Billing Account", "de": "Abrechnungskonto"},
- {
- "id": {"en": "ID", "de": "ID"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"},
- "userId": {"en": "User ID", "de": "Benutzer-ID"},
- "balance": {"en": "Balance (CHF)", "de": "Guthaben (CHF)"},
- "warningThreshold": {"en": "Warning Threshold (CHF)", "de": "Warnschwelle (CHF)"},
- "lastWarningAt": {"en": "Last Warning", "de": "Letzte Warnung"},
- "enabled": {"en": "Enabled", "de": "Aktiv"},
- },
-)
+ mandateId: str = Field(..., description="Foreign key to Mandate", json_schema_extra={"label": "Mandanten-ID"})
+ userId: Optional[str] = Field(
+ None,
+ description="Foreign key to User (None = mandate pool account, set = user audit account)",
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ balance: float = Field(default=0.0, description="Current balance in CHF", json_schema_extra={"label": "Guthaben (CHF)"})
+ warningThreshold: float = Field(
+ default=0.0,
+ description="Warning threshold in CHF",
+ json_schema_extra={"label": "Warnschwelle (CHF)"},
+ )
+ lastWarningAt: Optional[datetime] = Field(
+ None,
+ description="Last warning sent timestamp",
+ json_schema_extra={"label": "Letzte Warnung"},
+ )
+ enabled: bool = Field(default=True, description="Account is active", json_schema_extra={"label": "Aktiv"})
+@i18nModel("Transaktion")
class BillingTransaction(PowerOnModel):
"""Single billing transaction (credit, debit, adjustment)."""
id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
)
- accountId: str = Field(..., description="Foreign key to BillingAccount")
- transactionType: TransactionTypeEnum = Field(..., description="Transaction type")
- amount: float = Field(..., description="Amount in CHF (always positive)")
- description: str = Field(..., description="Transaction description")
-
+ accountId: str = Field(..., description="Foreign key to BillingAccount", json_schema_extra={"label": "Konto-ID"})
+ transactionType: TransactionTypeEnum = Field(..., description="Transaction type", json_schema_extra={"label": "Typ"})
+ amount: float = Field(..., description="Amount in CHF (always positive)", json_schema_extra={"label": "Betrag (CHF)"})
+ description: str = Field(..., description="Transaction description", json_schema_extra={"label": "Beschreibung"})
+
# Reference to source
- referenceType: Optional[ReferenceTypeEnum] = Field(None, description="Reference type")
- referenceId: Optional[str] = Field(None, description="Reference ID")
-
+ referenceType: Optional[ReferenceTypeEnum] = Field(None, description="Reference type", json_schema_extra={"label": "Referenztyp"})
+ referenceId: Optional[str] = Field(None, description="Reference ID", json_schema_extra={"label": "Referenz-ID"})
+
# Context for workflow transactions
- workflowId: Optional[str] = Field(None, description="Workflow ID (for WORKFLOW transactions)")
- featureInstanceId: Optional[str] = Field(None, description="Feature instance ID")
- featureCode: Optional[str] = Field(None, description="Feature code (e.g., automation)")
- aicoreProvider: Optional[str] = Field(None, description="AICore provider (anthropic, openai, etc.)")
- aicoreModel: Optional[str] = Field(None, description="AICore model name (e.g., claude-4-sonnet, gpt-4o)")
- createdByUserId: Optional[str] = Field(None, description="User who created/caused this transaction")
-
+ workflowId: Optional[str] = Field(None, description="Workflow ID (for WORKFLOW transactions)", json_schema_extra={"label": "Workflow-ID"})
+ featureInstanceId: Optional[str] = Field(None, description="Feature instance ID", json_schema_extra={"label": "Feature-Instanz-ID"})
+ featureCode: Optional[str] = Field(None, description="Feature code (e.g., automation)", json_schema_extra={"label": "Feature-Code"})
+ aicoreProvider: Optional[str] = Field(None, description="AICore provider (anthropic, openai, etc.)", json_schema_extra={"label": "AI-Anbieter"})
+ aicoreModel: Optional[str] = Field(None, description="AICore model name (e.g., claude-4-sonnet, gpt-4o)", json_schema_extra={"label": "AI-Modell"})
+ createdByUserId: Optional[str] = Field(None, description="User who created/caused this transaction", json_schema_extra={"label": "Erstellt von Benutzer"})
+
# AI call metadata (for per-call analytics)
- processingTime: Optional[float] = Field(None, description="Processing time in seconds")
- bytesSent: Optional[int] = Field(None, description="Bytes sent to AI model")
- bytesReceived: Optional[int] = Field(None, description="Bytes received from AI model")
- errorCount: Optional[int] = Field(None, description="Number of errors in this call")
-
-
-registerModelLabels(
- "BillingTransaction",
- {"en": "Billing Transaction", "de": "Transaktion"},
- {
- "id": {"en": "ID", "de": "ID"},
- "accountId": {"en": "Account ID", "de": "Konto-ID"},
- "transactionType": {"en": "Type", "de": "Typ"},
- "amount": {"en": "Amount (CHF)", "de": "Betrag (CHF)"},
- "description": {"en": "Description", "de": "Beschreibung"},
- "referenceType": {"en": "Reference Type", "de": "Referenztyp"},
- "referenceId": {"en": "Reference ID", "de": "Referenz-ID"},
- "workflowId": {"en": "Workflow ID", "de": "Workflow-ID"},
- "featureInstanceId": {"en": "Feature Instance ID", "de": "Feature-Instanz-ID"},
- "featureCode": {"en": "Feature Code", "de": "Feature-Code"},
- "aicoreProvider": {"en": "AI Provider", "de": "AI-Anbieter"},
- "aicoreModel": {"en": "AI Model", "de": "AI-Modell"},
- "createdByUserId": {"en": "Created By User", "de": "Erstellt von Benutzer"},
- },
-)
+ processingTime: Optional[float] = Field(None, description="Processing time in seconds", json_schema_extra={"label": "Verarbeitungszeit (s)"})
+ bytesSent: Optional[int] = Field(None, description="Bytes sent to AI model", json_schema_extra={"label": "Gesendete Bytes"})
+ bytesReceived: Optional[int] = Field(None, description="Bytes received from AI model", json_schema_extra={"label": "Empfangene Bytes"})
+ errorCount: Optional[int] = Field(None, description="Number of errors in this call", json_schema_extra={"label": "Fehleranzahl"})
+@i18nModel("Abrechnungseinstellungen")
class BillingSettings(BaseModel):
"""Billing settings per mandate. Only PREPAY_MANDATE model."""
id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ mandateId: str = Field(..., description="Foreign key to Mandate (UNIQUE)", json_schema_extra={"label": "Mandanten-ID"})
+
+ warningThresholdPercent: float = Field(
+ default=10.0,
+ description="Warning threshold as percentage",
+ json_schema_extra={"label": "Warnschwelle (%)"},
)
- mandateId: str = Field(..., description="Foreign key to Mandate (UNIQUE)")
- warningThresholdPercent: float = Field(default=10.0, description="Warning threshold as percentage")
-
# Stripe
- stripeCustomerId: Optional[str] = Field(None, description="Stripe Customer ID (cus_xxx) — one per mandate")
+ stripeCustomerId: Optional[str] = Field(
+ None,
+ description="Stripe Customer ID (cus_xxx) — one per mandate",
+ json_schema_extra={"label": "Stripe-Kunden-ID"},
+ )
# Auto-Recharge for AI budget
- autoRechargeEnabled: bool = Field(default=False, description="Auto-buy AI budget when low")
- rechargeAmountCHF: float = Field(default=10.0, description="Amount per auto-recharge (CHF, prepaid via Stripe)")
- rechargeMaxPerMonth: int = Field(default=3, description="Max auto-recharges per month")
- rechargesThisMonth: int = Field(default=0, description="Counter: auto-recharges used this month")
- monthResetAt: Optional[datetime] = Field(None, description="When rechargesThisMonth was last reset")
+ autoRechargeEnabled: bool = Field(default=False, description="Auto-buy AI budget when low", json_schema_extra={"label": "Auto-Nachladung"})
+ rechargeAmountCHF: float = Field(
+ default=10.0,
+ description="Amount per auto-recharge (CHF, prepaid via Stripe)",
+ json_schema_extra={"label": "Nachladebetrag (CHF)"},
+ )
+ rechargeMaxPerMonth: int = Field(default=3, description="Max auto-recharges per month", json_schema_extra={"label": "Max. Nachladungen/Monat"})
+ rechargesThisMonth: int = Field(default=0, description="Counter: auto-recharges used this month", json_schema_extra={"label": "Nachladungen diesen Monat"})
+ monthResetAt: Optional[datetime] = Field(None, description="When rechargesThisMonth was last reset", json_schema_extra={"label": "Monats-Reset"})
# Notifications
notifyEmails: List[str] = Field(
default_factory=list,
description="Email addresses for billing alerts (pool exhausted, warnings, etc.)",
+ json_schema_extra={"label": "E-Mails fuer Billing-Alerts (Inhaber/Admin)"},
)
- notifyOnWarning: bool = Field(default=True, description="Send email when warning threshold is reached")
+ notifyOnWarning: bool = Field(default=True, description="Send email when warning threshold is reached", json_schema_extra={"label": "Bei Warnung benachrichtigen"})
# Storage overage (high-watermark within subscription period; resets on new period)
storageHighWatermarkMB: float = Field(
- default=0.0, description="Peak indexed data volume MB this billing period"
+ default=0.0,
+ description="Peak indexed data volume MB this billing period",
+ json_schema_extra={"label": "Speicher-Peak (MB)"},
)
storagePeriodStartAt: Optional[datetime] = Field(
- None, description="Subscription billing period start used for storage reset"
+ None,
+ description="Subscription billing period start used for storage reset",
+ json_schema_extra={"label": "Speicher-Periodenbeginn"},
)
storageBilledUpToMB: float = Field(
default=0.0,
description="Overage MB already debited this period (above plan-included volume)",
+ json_schema_extra={"label": "Speicher abgerechneter Überhang (MB)"},
)
-registerModelLabels(
- "BillingSettings",
- {"en": "Billing Settings", "de": "Abrechnungseinstellungen"},
- {
- "id": {"en": "ID", "de": "ID"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"},
- "warningThresholdPercent": {"en": "Warning Threshold (%)", "de": "Warnschwelle (%)"},
- "stripeCustomerId": {"en": "Stripe Customer ID", "de": "Stripe-Kunden-ID"},
- "autoRechargeEnabled": {"en": "Auto-Recharge", "de": "Auto-Nachladung"},
- "rechargeAmountCHF": {"en": "Recharge Amount (CHF)", "de": "Nachladebetrag (CHF)"},
- "rechargeMaxPerMonth": {"en": "Max Recharges/Month", "de": "Max. Nachladungen/Monat"},
- "notifyEmails": {
- "en": "Billing notification emails (owner / admin)",
- "de": "E-Mails fuer Billing-Alerts (Inhaber/Admin)",
- },
- "notifyOnWarning": {"en": "Notify on Warning", "de": "Bei Warnung benachrichtigen"},
- "storageHighWatermarkMB": {"en": "Storage peak (MB)", "de": "Speicher-Peak (MB)"},
- "storagePeriodStartAt": {"en": "Storage period start", "de": "Speicher-Periodenbeginn"},
- "storageBilledUpToMB": {
- "en": "Storage billed overage (MB)",
- "de": "Speicher abgerechneter Überhang (MB)",
- },
- },
-)
-
-
class StripeWebhookEvent(BaseModel):
"""Stores processed Stripe webhook event IDs for idempotency."""
id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
)
event_id: str = Field(..., description="Stripe event ID (evt_xxx)")
processed_at: datetime = Field(
default_factory=lambda: datetime.now(timezone.utc),
- description="When the event was processed"
+ description="When the event was processed",
)
+@i18nModel("Nutzungsstatistik")
class UsageStatistics(BaseModel):
"""Aggregated usage statistics for quick retrieval."""
id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
)
- accountId: str = Field(..., description="Foreign key to BillingAccount")
- periodType: PeriodTypeEnum = Field(..., description="Period type")
- periodStart: date = Field(..., description="Period start date")
-
+ accountId: str = Field(..., description="Foreign key to BillingAccount", json_schema_extra={"label": "Konto-ID"})
+ periodType: PeriodTypeEnum = Field(..., description="Period type", json_schema_extra={"label": "Periodentyp"})
+ periodStart: date = Field(..., description="Period start date", json_schema_extra={"label": "Periodenbeginn"})
+
# Aggregated values
- totalCostCHF: float = Field(default=0.0, description="Total cost in CHF")
- transactionCount: int = Field(default=0, description="Number of transactions")
-
+ totalCostCHF: float = Field(default=0.0, description="Total cost in CHF", json_schema_extra={"label": "Gesamtkosten (CHF)"})
+ transactionCount: int = Field(default=0, description="Number of transactions", json_schema_extra={"label": "Anzahl Transaktionen"})
+
# Breakdown by provider
costByProvider: Dict[str, float] = Field(
- default_factory=dict,
- description="Cost breakdown by provider (e.g., {'anthropic': 12.50, 'openai': 8.30})"
+ default_factory=dict,
+ description="Cost breakdown by provider (e.g., {'anthropic': 12.50, 'openai': 8.30})",
+ json_schema_extra={"label": "Kosten nach Anbieter"},
)
-
+
# Breakdown by feature
costByFeature: Dict[str, float] = Field(
default_factory=dict,
- description="Cost breakdown by feature (e.g., {'automation': 5.80, 'workspace': 3.20})"
+ description="Cost breakdown by feature (e.g., {'automation': 5.80, 'workspace': 3.20})",
+ json_schema_extra={"label": "Kosten nach Feature"},
)
-registerModelLabels(
- "UsageStatistics",
- {"en": "Usage Statistics", "de": "Nutzungsstatistik"},
- {
- "id": {"en": "ID", "de": "ID"},
- "accountId": {"en": "Account ID", "de": "Konto-ID"},
- "periodType": {"en": "Period Type", "de": "Periodentyp"},
- "periodStart": {"en": "Period Start", "de": "Periodenbeginn"},
- "totalCostCHF": {"en": "Total Cost (CHF)", "de": "Gesamtkosten (CHF)"},
- "transactionCount": {"en": "Transaction Count", "de": "Anzahl Transaktionen"},
- "costByProvider": {"en": "Cost by Provider", "de": "Kosten nach Anbieter"},
- "costByFeature": {"en": "Cost by Feature", "de": "Kosten nach Feature"},
- },
-)
-
-
# ============================================================================
# Response Models for API
# ============================================================================
@@ -277,4 +244,3 @@ class BillingCheckResult(BaseModel):
subscriptionUiPath: Optional[str] = None
userAction: Optional[str] = None
-
diff --git a/modules/datamodels/datamodelChat.py b/modules/datamodels/datamodelChat.py
index 7154e57e..80b4455d 100644
--- a/modules/datamodels/datamodelChat.py
+++ b/modules/datamodels/datamodelChat.py
@@ -6,298 +6,135 @@ from typing import List, Dict, Any, Optional
from enum import Enum
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.shared.timeUtils import getUtcTimestamp
import uuid
-
+@i18nModel("Chat-Protokoll")
class ChatLog(PowerOnModel):
"""Log entries for chat workflows. User-owned, no mandate context."""
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
- )
- workflowId: str = Field(description="Foreign key to workflow")
- message: str = Field(description="Log message")
- type: str = Field(description="Log type (info, warning, error, etc.)")
- timestamp: float = Field(
- default_factory=getUtcTimestamp,
- description="When the log entry was created (UTC timestamp in seconds)",
- )
- status: Optional[str] = Field(None, description="Status of the log entry")
- progress: Optional[float] = Field(
- None, description="Progress indicator (0.0 to 1.0)"
- )
- performance: Optional[Dict[str, Any]] = Field(
- None, description="Performance metrics"
- )
- parentId: Optional[str] = Field(
- None, description="Parent operation ID (operationId of parent operation) for hierarchical display"
- )
- operationId: Optional[str] = Field(
- None, description="Operation ID to group related log entries"
- )
- roundNumber: Optional[int] = Field(None, description="Round number in workflow")
- taskNumber: Optional[int] = Field(None, description="Task number within round")
- actionNumber: Optional[int] = Field(None, description="Action number within task")
-
-
-registerModelLabels(
- "ChatLog",
- {"en": "Chat Log", "fr": "Journal de chat"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "workflowId": {"en": "Workflow ID", "fr": "ID du flux de travail"},
- "message": {"en": "Message", "fr": "Message"},
- "type": {"en": "Type", "fr": "Type"},
- "timestamp": {"en": "Timestamp", "fr": "Horodatage"},
- "status": {"en": "Status", "fr": "Statut"},
- "progress": {"en": "Progress", "fr": "Progression"},
- "performance": {"en": "Performance", "fr": "Performance"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID"})
+ workflowId: str = Field(description="Foreign key to workflow", json_schema_extra={"label": "Workflow-ID"})
+ message: str = Field(description="Log message", json_schema_extra={"label": "Nachricht"})
+ type: str = Field(description="Log type (info, warning, error, etc.)", json_schema_extra={"label": "Typ"})
+ timestamp: float = Field(default_factory=getUtcTimestamp,
+ description="When the log entry was created (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel"})
+ status: Optional[str] = Field(None, description="Status of the log entry", json_schema_extra={"label": "Status"})
+ progress: Optional[float] = Field(None, description="Progress indicator (0.0 to 1.0)", json_schema_extra={"label": "Fortschritt"})
+ performance: Optional[Dict[str, Any]] = Field(None, description="Performance metrics", json_schema_extra={"label": "Leistung"})
+ parentId: Optional[str] = Field(None, description="Parent operation ID (operationId of parent operation) for hierarchical display", json_schema_extra={"label": "Übergeordnete ID"})
+ operationId: Optional[str] = Field(None, description="Operation ID to group related log entries", json_schema_extra={"label": "Vorgangs-ID"})
+ roundNumber: Optional[int] = Field(None, description="Round number in workflow", json_schema_extra={"label": "Rundennummer"})
+ taskNumber: Optional[int] = Field(None, description="Task number within round", json_schema_extra={"label": "Aufgabennummer"})
+ actionNumber: Optional[int] = Field(None, description="Action number within task", json_schema_extra={"label": "Aktionsnummer"})
+@i18nModel("Chat-Dokument")
class ChatDocument(PowerOnModel):
"""Documents attached to chat messages. User-owned, no mandate context."""
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
- )
- messageId: str = Field(description="Foreign key to message")
- fileId: str = Field(description="Foreign key to file")
- fileName: str = Field(description="Name of the file")
- fileSize: int = Field(description="Size of the file")
- mimeType: str = Field(description="MIME type of the file")
- roundNumber: Optional[int] = Field(None, description="Round number in workflow")
- taskNumber: Optional[int] = Field(None, description="Task number within round")
- actionNumber: Optional[int] = Field(None, description="Action number within task")
- actionId: Optional[str] = Field(
- None, description="ID of the action that created this document"
- )
-
-
-registerModelLabels(
- "ChatDocument",
- {"en": "Chat Document", "fr": "Document de chat"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "messageId": {"en": "Message ID", "fr": "ID du message"},
- "fileId": {"en": "File ID", "fr": "ID du fichier"},
- "fileName": {"en": "File Name", "fr": "Nom du fichier"},
- "fileSize": {"en": "File Size", "fr": "Taille du fichier"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- "roundNumber": {"en": "Round Number", "fr": "Numéro de tour"},
- "taskNumber": {"en": "Task Number", "fr": "Numéro de tâche"},
- "actionNumber": {"en": "Action Number", "fr": "Numéro d'action"},
- "actionId": {"en": "Action ID", "fr": "ID de l'action"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID"})
+ messageId: str = Field(description="Foreign key to message", json_schema_extra={"label": "Nachrichten-ID"})
+ fileId: str = Field(description="Foreign key to file", json_schema_extra={"label": "Datei-ID"})
+ fileName: str = Field(description="Name of the file", json_schema_extra={"label": "Dateiname"})
+ fileSize: int = Field(description="Size of the file", json_schema_extra={"label": "Dateigröße"})
+ mimeType: str = Field(description="MIME type of the file", json_schema_extra={"label": "MIME-Typ"})
+ roundNumber: Optional[int] = Field(None, description="Round number in workflow", json_schema_extra={"label": "Rundennummer"})
+ taskNumber: Optional[int] = Field(None, description="Task number within round", json_schema_extra={"label": "Aufgabennummer"})
+ actionNumber: Optional[int] = Field(None, description="Action number within task", json_schema_extra={"label": "Aktionsnummer"})
+ actionId: Optional[str] = Field(None, description="ID of the action that created this document", json_schema_extra={"label": "Aktions-ID"})
+@i18nModel("Inhalts-Metadaten")
class ContentMetadata(BaseModel):
- size: int = Field(description="Content size in bytes")
- pages: Optional[int] = Field(
- None, description="Number of pages for multi-page content"
- )
- error: Optional[str] = Field(None, description="Processing error if any")
- width: Optional[int] = Field(None, description="Width in pixels for images/videos")
- height: Optional[int] = Field(
- None, description="Height in pixels for images/videos"
- )
- colorMode: Optional[str] = Field(None, description="Color mode")
- fps: Optional[float] = Field(None, description="Frames per second for videos")
- durationSec: Optional[float] = Field(
- None, description="Duration in seconds for media"
- )
- mimeType: str = Field(description="MIME type of the content")
- base64Encoded: bool = Field(description="Whether the data is base64 encoded")
-
-
-registerModelLabels(
- "ContentMetadata",
- {"en": "Content Metadata", "fr": "Métadonnées du contenu"},
- {
- "size": {"en": "Size", "fr": "Taille"},
- "pages": {"en": "Pages", "fr": "Pages"},
- "error": {"en": "Error", "fr": "Erreur"},
- "width": {"en": "Width", "fr": "Largeur"},
- "height": {"en": "Height", "fr": "Hauteur"},
- "colorMode": {"en": "Color Mode", "fr": "Mode de couleur"},
- "fps": {"en": "FPS", "fr": "IPS"},
- "durationSec": {"en": "Duration", "fr": "Durée"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- "base64Encoded": {"en": "Base64 Encoded", "fr": "Encodé en Base64"},
- },
-)
-
+ size: int = Field(description="Content size in bytes", json_schema_extra={"label": "Größe"})
+ pages: Optional[int] = Field(None, description="Number of pages for multi-page content", json_schema_extra={"label": "Seiten"})
+ error: Optional[str] = Field(None, description="Processing error if any", json_schema_extra={"label": "Fehler"})
+ width: Optional[int] = Field(None, description="Width in pixels for images/videos", json_schema_extra={"label": "Breite"})
+ height: Optional[int] = Field(None, description="Height in pixels for images/videos", json_schema_extra={"label": "Höhe"})
+ colorMode: Optional[str] = Field(None, description="Color mode", json_schema_extra={"label": "Farbmodus"})
+ fps: Optional[float] = Field(None, description="Frames per second for videos", json_schema_extra={"label": "FPS"})
+ durationSec: Optional[float] = Field(None, description="Duration in seconds for media", json_schema_extra={"label": "Dauer"})
+ mimeType: str = Field(description="MIME type of the content", json_schema_extra={"label": "MIME-Typ"})
+ base64Encoded: bool = Field(description="Whether the data is base64 encoded", json_schema_extra={"label": "Base64-kodiert"})
+@i18nModel("Inhaltselement")
class ContentItem(BaseModel):
- label: str = Field(description="Content label")
- data: str = Field(description="Extracted text content")
- metadata: ContentMetadata = Field(description="Content metadata")
-
-
-registerModelLabels(
- "ContentItem",
- {"en": "Content Item", "fr": "Élément de contenu"},
- {
- "label": {"en": "Label", "fr": "Étiquette"},
- "data": {"en": "Data", "fr": "Données"},
- "metadata": {"en": "Metadata", "fr": "Métadonnées"},
- },
-)
-
+ label: str = Field(description="Content label", json_schema_extra={"label": "Bezeichnung"})
+ data: str = Field(description="Extracted text content", json_schema_extra={"label": "Daten"})
+ metadata: ContentMetadata = Field(description="Content metadata", json_schema_extra={"label": "Metadaten"})
+@i18nModel("Extrahierter Inhalt")
class ChatContentExtracted(BaseModel):
- id: str = Field(description="Reference to source ChatDocument")
- contents: List[ContentItem] = Field(
- default_factory=list, description="List of content items"
- )
-
-
-registerModelLabels(
- "ChatContentExtracted",
- {"en": "Extracted Content", "fr": "Contenu extrait"},
- {
- "id": {"en": "Object ID", "fr": "ID de l'objet"},
- "contents": {"en": "Contents", "fr": "Contenus"},
- },
-)
-
+ id: str = Field(description="Reference to source ChatDocument", json_schema_extra={"label": "Objekt-ID"})
+ contents: List[ContentItem] = Field(default_factory=list, description="List of content items", json_schema_extra={"label": "Inhalte"})
+@i18nModel("Chat-Nachricht")
class ChatMessage(PowerOnModel):
"""Messages in chat workflows. User-owned, no mandate context."""
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()), description="Primary key"
- )
- workflowId: str = Field(description="Foreign key to workflow")
- parentMessageId: Optional[str] = Field(
- None, description="Parent message ID for threading"
- )
- documents: List[ChatDocument] = Field(
- default_factory=list, description="Associated documents"
- )
- documentsLabel: Optional[str] = Field(
- None, description="Label for the set of documents"
- )
- message: Optional[str] = Field(None, description="Message content")
- summary: Optional[str] = Field(
- None, description="Short summary of this message for planning/history"
- )
- role: str = Field(description="Role of the message sender")
- status: str = Field(description="Status of the message (first, step, last)")
- sequenceNr: Optional[int] = Field(
- default=0,
- description="Sequence number of the message (set automatically)"
- )
- publishedAt: Optional[float] = Field(
- default=None,
- description="When the message was published (UTC timestamp in seconds)",
- )
- success: Optional[bool] = Field(
- None, description="Whether the message processing was successful"
- )
- actionId: Optional[str] = Field(
- None, description="ID of the action that produced this message"
- )
- actionMethod: Optional[str] = Field(
- None, description="Method of the action that produced this message"
- )
- actionName: Optional[str] = Field(
- None, description="Name of the action that produced this message"
- )
- roundNumber: Optional[int] = Field(None, description="Round number in workflow")
- taskNumber: Optional[int] = Field(None, description="Task number within round")
- actionNumber: Optional[int] = Field(None, description="Action number within task")
- taskProgress: Optional[str] = Field(
- None, description="Task progress status: pending, running, success, fail, retry"
- )
- actionProgress: Optional[str] = Field(
- None, description="Action progress status: pending, running, success, fail"
- )
-
-
-registerModelLabels(
- "ChatMessage",
- {"en": "Chat Message", "fr": "Message de chat"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "workflowId": {"en": "Workflow ID", "fr": "ID du flux de travail"},
- "parentMessageId": {"en": "Parent Message ID", "fr": "ID du message parent"},
- "documents": {"en": "Documents", "fr": "Documents"},
- "documentsLabel": {"en": "Documents Label", "fr": "Label des documents"},
- "message": {"en": "Message", "fr": "Message"},
- "summary": {"en": "Summary", "fr": "Résumé"},
- "role": {"en": "Role", "fr": "Rôle"},
- "status": {"en": "Status", "fr": "Statut"},
- "sequenceNr": {"en": "Sequence Number", "fr": "Numéro de séquence"},
- "publishedAt": {"en": "Published At", "fr": "Publié le"},
- "success": {"en": "Success", "fr": "Succès"},
- "actionId": {"en": "Action ID", "fr": "ID de l'action"},
- "actionMethod": {"en": "Action Method", "fr": "Méthode de l'action"},
- "actionName": {"en": "Action Name", "fr": "Nom de l'action"},
- "roundNumber": {"en": "Round Number", "fr": "Numéro de tour"},
- "taskNumber": {"en": "Task Number", "fr": "Numéro de tâche"},
- "actionNumber": {"en": "Action Number", "fr": "Numéro d'action"},
- "taskProgress": {"en": "Task Progress", "fr": "Progression de la tâche"},
- "actionProgress": {"en": "Action Progress", "fr": "Progression de l'action"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID"})
+ workflowId: str = Field(description="Foreign key to workflow", json_schema_extra={"label": "Workflow-ID"})
+ parentMessageId: Optional[str] = Field(None, description="Parent message ID for threading", json_schema_extra={"label": "Übergeordnete Nachrichten-ID"})
+ documents: List[ChatDocument] = Field(default_factory=list, description="Associated documents", json_schema_extra={"label": "Dokumente"})
+ documentsLabel: Optional[str] = Field(None, description="Label for the set of documents", json_schema_extra={"label": "Dokumenten-Label"})
+ message: Optional[str] = Field(None, description="Message content", json_schema_extra={"label": "Nachricht"})
+ summary: Optional[str] = Field(None, description="Short summary of this message for planning/history", json_schema_extra={"label": "Zusammenfassung"})
+ role: str = Field(description="Role of the message sender", json_schema_extra={"label": "Rolle"})
+ status: str = Field(description="Status of the message (first, step, last)", json_schema_extra={"label": "Status"})
+ sequenceNr: Optional[int] = Field(default=0,
+ description="Sequence number of the message (set automatically)", json_schema_extra={"label": "Sequenznummer"})
+ publishedAt: Optional[float] = Field(default=None,
+ description="When the message was published (UTC timestamp in seconds)", json_schema_extra={"label": "Veröffentlicht am"})
+ success: Optional[bool] = Field(None, description="Whether the message processing was successful", json_schema_extra={"label": "Erfolg"})
+ actionId: Optional[str] = Field(None, description="ID of the action that produced this message", json_schema_extra={"label": "Aktions-ID"})
+ actionMethod: Optional[str] = Field(None, description="Method of the action that produced this message", json_schema_extra={"label": "Aktionsmethode"})
+ actionName: Optional[str] = Field(None, description="Name of the action that produced this message", json_schema_extra={"label": "Aktionsname"})
+ roundNumber: Optional[int] = Field(None, description="Round number in workflow", json_schema_extra={"label": "Rundennummer"})
+ taskNumber: Optional[int] = Field(None, description="Task number within round", json_schema_extra={"label": "Aufgabennummer"})
+ actionNumber: Optional[int] = Field(None, description="Action number within task", json_schema_extra={"label": "Aktionsnummer"})
+ taskProgress: Optional[str] = Field(None, description="Task progress status: pending, running, success, fail, retry", json_schema_extra={"label": "Aufgabenfortschritt"})
+ actionProgress: Optional[str] = Field(None, description="Action progress status: pending, running, success, fail", json_schema_extra={"label": "Aktionsfortschritt"})
class WorkflowModeEnum(str, Enum):
WORKFLOW_DYNAMIC = "Dynamic"
WORKFLOW_AUTOMATION = "Automation"
WORKFLOW_CHATBOT = "Chatbot"
-
-registerModelLabels(
- "WorkflowModeEnum",
- {"en": "Workflow Mode", "fr": "Mode de workflow"},
- {
- "WORKFLOW_DYNAMIC": {"en": "Dynamic", "fr": "Dynamique"},
- "WORKFLOW_AUTOMATION": {"en": "Automation", "fr": "Automatisation"},
- "WORKFLOW_CHATBOT": {"en": "Chatbot", "fr": "Chatbot"},
- },
-)
-
-
+@i18nModel("Chat-Workflow")
class ChatWorkflow(PowerOnModel):
"""Chat workflow container. User-owned, no mandate context."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- featureInstanceId: Optional[str] = Field(None, description="Feature instance ID for multi-tenancy isolation", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ featureInstanceId: Optional[str] = Field(None, description="Feature instance ID for multi-tenancy isolation", json_schema_extra={"label": "Feature-Instanz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
status: str = Field(default="running", description="Current status of the workflow", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "running", "label": {"en": "Running", "fr": "En cours"}},
- {"value": "completed", "label": {"en": "Completed", "fr": "Terminé"}},
- {"value": "stopped", "label": {"en": "Stopped", "fr": "Arrêté"}},
- {"value": "error", "label": {"en": "Error", "fr": "Erreur"}},
+ {"value": "running", "label": "Running"},
+ {"value": "completed", "label": "Completed"},
+ {"value": "stopped", "label": "Stopped"},
+ {"value": "error", "label": "Error"},
]})
- name: Optional[str] = Field(None, description="Name of the workflow", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
- currentRound: int = Field(default=0, description="Current round number", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
- currentTask: int = Field(default=0, description="Current task number", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
- currentAction: int = Field(default=0, description="Current action number", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
- totalTasks: int = Field(default=0, description="Total number of tasks in the workflow", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
- totalActions: int = Field(default=0, description="Total number of actions in the workflow", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
- lastActivity: float = Field(default_factory=getUtcTimestamp, description="Timestamp of last activity (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
- startedAt: float = Field(default_factory=getUtcTimestamp, description="When the workflow started (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
- logs: List[ChatLog] = Field(default_factory=list, description="Workflow logs", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- messages: List[ChatMessage] = Field(default_factory=list, description="Messages in the workflow", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- tasks: list = Field(default_factory=list, description="List of tasks in the workflow", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ name: Optional[str] = Field(None, description="Name of the workflow", json_schema_extra={"label": "Name", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
+ currentRound: int = Field(default=0, description="Current round number", json_schema_extra={"label": "Aktuelle Runde", "frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ currentTask: int = Field(default=0, description="Current task number", json_schema_extra={"label": "Aktuelle Aufgabe", "frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ currentAction: int = Field(default=0, description="Current action number", json_schema_extra={"label": "Aktuelle Aktion", "frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ totalTasks: int = Field(default=0, description="Total number of tasks in the workflow", json_schema_extra={"label": "Aufgaben gesamt", "frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ totalActions: int = Field(default=0, description="Total number of actions in the workflow", json_schema_extra={"label": "Aktionen gesamt", "frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
+ lastActivity: float = Field(default_factory=getUtcTimestamp, description="Timestamp of last activity (UTC timestamp in seconds)", json_schema_extra={"label": "Letzte Aktivität", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ startedAt: float = Field(default_factory=getUtcTimestamp, description="When the workflow started (UTC timestamp in seconds)", json_schema_extra={"label": "Gestartet am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
+ logs: List[ChatLog] = Field(default_factory=list, description="Workflow logs", json_schema_extra={"label": "Protokolle", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ messages: List[ChatMessage] = Field(default_factory=list, description="Messages in the workflow", json_schema_extra={"label": "Nachrichten", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ tasks: list = Field(default_factory=list, description="List of tasks in the workflow", json_schema_extra={"label": "Aufgaben", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
workflowMode: WorkflowModeEnum = Field(default=WorkflowModeEnum.WORKFLOW_DYNAMIC, description="Workflow mode selector", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
{
"value": WorkflowModeEnum.WORKFLOW_DYNAMIC.value,
- "label": {"en": "Dynamic", "fr": "Dynamique"},
+ "label": "Dynamic",
},
{
"value": WorkflowModeEnum.WORKFLOW_AUTOMATION.value,
- "label": {"en": "Automation", "fr": "Automatisation"},
+ "label": "Automation",
},
{
"value": WorkflowModeEnum.WORKFLOW_CHATBOT.value,
- "label": {"en": "Chatbot", "fr": "Chatbot"},
+ "label": "Chatbot",
},
]})
- maxSteps: int = Field(default=10, description="Maximum number of iterations in dynamic mode", json_schema_extra={"frontend_type": "integer", "frontend_readonly": False, "frontend_required": False})
- expectedFormats: Optional[List[str]] = Field(None, description="List of expected file format extensions from user request (e.g., ['xlsx', 'pdf']). Extracted during intent analysis.", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ maxSteps: int = Field(default=10, description="Maximum number of iterations in dynamic mode", json_schema_extra={"label": "Max. Schritte", "frontend_type": "integer", "frontend_readonly": False, "frontend_required": False})
+ expectedFormats: Optional[List[str]] = Field(None, description="List of expected file format extensions from user request (e.g., ['xlsx', 'pdf']). Extracted during intent analysis.", json_schema_extra={"label": "Erwartete Formate", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
# Helper methods for execution state management
def getRoundIndex(self) -> int:
@@ -327,80 +164,27 @@ class ChatWorkflow(PowerOnModel):
"""Increment action when executing new action in current task"""
self.currentAction += 1
-
-registerModelLabels(
- "ChatWorkflow",
- {"en": "Chat Workflow", "fr": "Flux de travail de chat"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"},
- "status": {"en": "Status", "fr": "Statut"},
- "name": {"en": "Name", "fr": "Nom"},
- "currentRound": {"en": "Current Round", "fr": "Tour actuel"},
- "currentTask": {"en": "Current Task", "fr": "Tâche actuelle"},
- "currentAction": {"en": "Current Action", "fr": "Action actuelle"},
- "totalTasks": {"en": "Total Tasks", "fr": "Total des tâches"},
- "totalActions": {"en": "Total Actions", "fr": "Total des actions"},
- "lastActivity": {"en": "Last Activity", "fr": "Dernière activité"},
- "startedAt": {"en": "Started At", "fr": "Démarré le"},
- "logs": {"en": "Logs", "fr": "Journaux"},
- "messages": {"en": "Messages", "fr": "Messages"},
- "stats": {"en": "Statistics", "fr": "Statistiques"},
- "tasks": {"en": "Tasks", "fr": "Tâches"},
- "workflowMode": {"en": "Workflow Mode", "fr": "Mode de workflow"},
- "maxSteps": {"en": "Max Steps", "fr": "Étapes max"},
- "expectedFormats": {"en": "Expected Formats", "fr": "Formats attendus"},
- },
-)
-
-
+@i18nModel("Benutzereingabe")
class UserInputRequest(BaseModel):
- prompt: str = Field(description="Prompt for the user")
- listFileId: List[str] = Field(default_factory=list, description="List of file IDs")
- userLanguage: str = Field(default="en", description="User's preferred language")
- workflowId: Optional[str] = Field(None, description="Optional ID of the workflow to continue")
- allowedProviders: Optional[List[str]] = Field(None, description="List of allowed AI providers (multiselect)")
-
-
-registerModelLabels(
- "UserInputRequest",
- {"en": "User Input Request", "fr": "Demande de saisie utilisateur"},
- {
- "prompt": {"en": "Prompt", "fr": "Invite"},
- "listFileId": {"en": "File IDs", "fr": "IDs des fichiers"},
- "userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
- "preferredProvider": {"en": "Preferred Provider", "fr": "Fournisseur préféré"},
- },
-)
-
+ prompt: str = Field(description="Prompt for the user", json_schema_extra={"label": "Eingabeaufforderung"})
+ listFileId: List[str] = Field(default_factory=list, description="List of file IDs", json_schema_extra={"label": "Datei-IDs"})
+ userLanguage: str = Field(default="en", description="User's preferred language", json_schema_extra={"label": "Benutzersprache"})
+ workflowId: Optional[str] = Field(None, description="Optional ID of the workflow to continue", json_schema_extra={"label": "Workflow-ID"})
+ allowedProviders: Optional[List[str]] = Field(None, description="List of allowed AI providers (multiselect)", json_schema_extra={"label": "Erlaubte Anbieter"})
+@i18nModel("Aktions-Dokument")
class ActionDocument(BaseModel):
"""Clear document structure for action results"""
- documentName: str = Field(description="Name of the document")
- documentData: Any = Field(description="Content/data of the document")
- mimeType: str = Field(description="MIME type of the document")
- sourceJson: Optional[Dict[str, Any]] = Field(
- None,
- description="Source JSON structure (preserved when rendering to xlsx/docx/pdf)"
- )
- validationMetadata: Optional[Dict[str, Any]] = Field(
- None,
- description="Action-specific metadata for content validation (e.g., email recipients, attachments, SharePoint paths)"
- )
-
-
-registerModelLabels(
- "ActionDocument",
- {"en": "Action Document", "fr": "Document d'action"},
- {
- "documentName": {"en": "Document Name", "fr": "Nom du document"},
- "documentData": {"en": "Document Data", "fr": "Données du document"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- },
-)
-
+ documentName: str = Field(description="Name of the document", json_schema_extra={"label": "Dokumentname"})
+ documentData: Any = Field(description="Content/data of the document", json_schema_extra={"label": "Dokumentdaten"})
+ mimeType: str = Field(description="MIME type of the document", json_schema_extra={"label": "MIME-Typ"})
+ sourceJson: Optional[Dict[str, Any]] = Field(None,
+ description="Source JSON structure (preserved when rendering to xlsx/docx/pdf)", json_schema_extra={"label": "Quell-JSON"})
+ validationMetadata: Optional[Dict[str, Any]] = Field(None,
+ description="Action-specific metadata for content validation (e.g., email recipients, attachments, SharePoint paths)", json_schema_extra={"label": "Validierungs-Metadaten"})
+@i18nModel("Aktionsergebnis")
class ActionResult(BaseModel):
"""Clean action result with documents as primary output
@@ -409,15 +193,11 @@ class ActionResult(BaseModel):
from the action plan. This ensures consistent document routing throughout the workflow.
"""
- success: bool = Field(description="Whether execution succeeded")
- error: Optional[str] = Field(None, description="Error message if failed")
- documents: List[ActionDocument] = Field(
- default_factory=list, description="Document outputs"
- )
- resultLabel: Optional[str] = Field(
- None,
- description="Label for document routing (set by action handler, not by action methods)",
- )
+ success: bool = Field(description="Whether execution succeeded", json_schema_extra={"label": "Erfolg"})
+ error: Optional[str] = Field(None, description="Error message if failed", json_schema_extra={"label": "Fehler"})
+ documents: List[ActionDocument] = Field(default_factory=list, description="Document outputs", json_schema_extra={"label": "Dokumente"})
+ resultLabel: Optional[str] = Field(None,
+ description="Label for document routing (set by action handler, not by action methods)", json_schema_extra={"label": "Ergebnis-Label"})
@classmethod
def isSuccess(cls, documents: List[ActionDocument] = None) -> "ActionResult":
@@ -429,76 +209,32 @@ class ActionResult(BaseModel):
) -> "ActionResult":
return cls(success=False, documents=documents or [], error=error)
-
-registerModelLabels(
- "ActionResult",
- {"en": "Action Result", "fr": "Résultat de l'action"},
- {
- "success": {"en": "Success", "fr": "Succès"},
- "error": {"en": "Error", "fr": "Erreur"},
- "documents": {"en": "Documents", "fr": "Documents"},
- "resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"},
- },
-)
-
-
+@i18nModel("Aktionsauswahl")
class ActionSelection(BaseModel):
- method: str = Field(description="Method to execute (e.g., web, document, ai)")
- name: str = Field(
- description="Action name within the method (e.g., search, extract)"
- )
-
-
-registerModelLabels(
- "ActionSelection",
- {"en": "Action Selection", "fr": "Sélection d'action"},
- {
- "method": {"en": "Method", "fr": "Méthode"},
- "name": {"en": "Action Name", "fr": "Nom de l'action"},
- },
-)
-
+ method: str = Field(description="Method to execute (e.g., web, document, ai)", json_schema_extra={"label": "Methode"})
+ name: str = Field(description="Action name within the method (e.g., search, extract)", json_schema_extra={"label": "Aktionsname"})
+@i18nModel("Aktionsparameter")
class ActionParameters(BaseModel):
- parameters: Dict[str, Any] = Field(
- default_factory=dict, description="Parameters to execute the selected action"
- )
-
-
-registerModelLabels(
- "ActionParameters",
- {"en": "Action Parameters", "fr": "Paramètres d'action"},
- {
- "parameters": {"en": "Parameters", "fr": "Paramètres"},
- },
-)
-
+ parameters: Dict[str, Any] = Field(default_factory=dict, description="Parameters to execute the selected action", json_schema_extra={"label": "Parameter"})
+@i18nModel("Beobachtungs-Vorschau")
+@i18nModel("Beobachtung")
+@i18nModel("Beobachtungs-Vorschau")
+@i18nModel("Beobachtung")
class ObservationPreview(BaseModel):
- name: str = Field(description="Document name or URL label")
- mime: Optional[str] = Field(default=None, description="MIME type or kind (legacy field)")
- snippet: Optional[str] = Field(default=None, description="Short snippet or summary")
+ name: str = Field(description="Document name or URL label", json_schema_extra={"label": "Name"})
+ mime: Optional[str] = Field(default=None, description="MIME type or kind (legacy field)", json_schema_extra={"label": "MIME"})
+ snippet: Optional[str] = Field(default=None, description="Short snippet or summary", json_schema_extra={"label": "Ausschnitt"})
# Extended metadata fields
- mimeType: Optional[str] = Field(default=None, description="MIME type")
- size: Optional[str] = Field(default=None, description="File size")
- created: Optional[str] = Field(default=None, description="Creation timestamp")
- modified: Optional[str] = Field(default=None, description="Modification timestamp")
- typeGroup: Optional[str] = Field(default=None, description="Document type group")
- documentId: Optional[str] = Field(default=None, description="Document ID")
- reference: Optional[str] = Field(default=None, description="Document reference")
- contentSize: Optional[str] = Field(default=None, description="Content size indicator")
-
-
-registerModelLabels(
- "ObservationPreview",
- {"en": "Observation Preview", "fr": "Aperçu d'observation"},
- {
- "name": {"en": "Name", "fr": "Nom"},
- "mime": {"en": "MIME", "fr": "MIME"},
- "snippet": {"en": "Snippet", "fr": "Extrait"},
- },
-)
-
+ mimeType: Optional[str] = Field(default=None, description="MIME type", json_schema_extra={"label": "MIME-Typ"})
+ size: Optional[str] = Field(default=None, description="File size", json_schema_extra={"label": "Größe"})
+ created: Optional[str] = Field(default=None, description="Creation timestamp", json_schema_extra={"label": "Erstellt"})
+ modified: Optional[str] = Field(default=None, description="Modification timestamp", json_schema_extra={"label": "Geändert"})
+ typeGroup: Optional[str] = Field(default=None, description="Document type group", json_schema_extra={"label": "Typgruppe"})
+ documentId: Optional[str] = Field(default=None, description="Document ID", json_schema_extra={"label": "Dokument-ID"})
+ reference: Optional[str] = Field(default=None, description="Document reference", json_schema_extra={"label": "Referenz"})
+ contentSize: Optional[str] = Field(default=None, description="Content size indicator", json_schema_extra={"label": "Inhaltsgröße"})
class Observation(BaseModel):
success: bool = Field(description="Action execution success flag")
@@ -518,20 +254,6 @@ class Observation(BaseModel):
default=None, description="Content analysis results"
)
-
-registerModelLabels(
- "Observation",
- {"en": "Observation", "fr": "Observation"},
- {
- "success": {"en": "Success", "fr": "Succès"},
- "resultLabel": {"en": "Result Label", "fr": "Étiquette du résultat"},
- "documentsCount": {"en": "Documents Count", "fr": "Nombre de documents"},
- "previews": {"en": "Previews", "fr": "Aperçus"},
- "notes": {"en": "Notes", "fr": "Notes"},
- },
-)
-
-
class TaskStatus(str, Enum):
PENDING = "pending"
RUNNING = "running"
@@ -539,64 +261,27 @@ class TaskStatus(str, Enum):
FAILED = "failed"
CANCELLED = "cancelled"
-
-registerModelLabels(
- "TaskStatus",
- {"en": "Task Status", "fr": "Statut de la tâche"},
- {
- "PENDING": {"en": "Pending", "fr": "En attente"},
- "RUNNING": {"en": "Running", "fr": "En cours"},
- "COMPLETED": {"en": "Completed", "fr": "Terminé"},
- "FAILED": {"en": "Failed", "fr": "Échec"},
- "CANCELLED": {"en": "Cancelled", "fr": "Annulé"},
- },
-)
-
-
+@i18nModel("Dokumentaustausch")
class DocumentExchange(BaseModel):
- documentsLabel: str = Field(description="Label for the set of documents")
- documents: List[str] = Field(
- default_factory=list, description="List of document references"
- )
-
-
-registerModelLabels(
- "DocumentExchange",
- {"en": "Document Exchange", "fr": "Échange de documents"},
- {
- "documentsLabel": {"en": "Documents Label", "fr": "Label des documents"},
- "documents": {"en": "Documents", "fr": "Documents"},
- },
-)
-
+ documentsLabel: str = Field(description="Label for the set of documents", json_schema_extra={"label": "Dokumenten-Label"})
+ documents: List[str] = Field(default_factory=list, description="List of document references", json_schema_extra={"label": "Dokumente"})
+@i18nModel("Aufgaben-Aktion")
class ActionItem(BaseModel):
- id: str = Field(..., description="Action ID")
- execMethod: str = Field(..., description="Method to execute")
- execAction: str = Field(..., description="Action to perform")
- execParameters: Dict[str, Any] = Field(
- default_factory=dict, description="Action parameters"
- )
- execResultLabel: Optional[str] = Field(
- None, description="Label for the set of result documents"
- )
- expectedDocumentFormats: Optional[List[Dict[str, str]]] = Field(
- None, description="Expected document formats (optional)"
- )
- userMessage: Optional[str] = Field(
- None, description="User-friendly message in user's language"
- )
- status: TaskStatus = Field(default=TaskStatus.PENDING, description="Action status")
- error: Optional[str] = Field(None, description="Error message if action failed")
- retryCount: int = Field(default=0, description="Number of retries attempted")
- retryMax: int = Field(default=3, description="Maximum number of retries")
- processingTime: Optional[float] = Field(
- None, description="Processing time in seconds"
- )
- timestamp: float = Field(
- ..., description="When the action was executed (UTC timestamp in seconds)"
- )
- result: Optional[str] = Field(None, description="Result of the action")
+ id: str = Field(..., description="Action ID", json_schema_extra={"label": "Aktions-ID"})
+ execMethod: str = Field(..., description="Method to execute", json_schema_extra={"label": "Methode"})
+ execAction: str = Field(..., description="Action to perform", json_schema_extra={"label": "Aktion"})
+ execParameters: Dict[str, Any] = Field(default_factory=dict, description="Action parameters", json_schema_extra={"label": "Parameter"})
+ execResultLabel: Optional[str] = Field(None, description="Label for the set of result documents", json_schema_extra={"label": "Ergebnis-Label"})
+ expectedDocumentFormats: Optional[List[Dict[str, str]]] = Field(None, description="Expected document formats (optional)", json_schema_extra={"label": "Erwartete Dokumentformate"})
+ userMessage: Optional[str] = Field(None, description="User-friendly message in user's language", json_schema_extra={"label": "Benutzernachricht"})
+ status: TaskStatus = Field(default=TaskStatus.PENDING, description="Action status", json_schema_extra={"label": "Status"})
+ error: Optional[str] = Field(None, description="Error message if action failed", json_schema_extra={"label": "Fehler"})
+ retryCount: int = Field(default=0, description="Number of retries attempted", json_schema_extra={"label": "Wiederholungen"})
+ retryMax: int = Field(default=3, description="Maximum number of retries", json_schema_extra={"label": "Max. Wiederholungen"})
+ processingTime: Optional[float] = Field(None, description="Processing time in seconds", json_schema_extra={"label": "Bearbeitungszeit"})
+ timestamp: float = Field(..., description="When the action was executed (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel"})
+ result: Optional[str] = Field(None, description="Result of the action", json_schema_extra={"label": "Ergebnis"})
def setSuccess(self, result: str = None) -> None:
"""Set the action as successful with optional result"""
@@ -610,191 +295,59 @@ class ActionItem(BaseModel):
self.status = TaskStatus.FAILED
self.error = error_message
+@i18nModel("Chat-Aufgabenergebnis")
+class ChatTaskResult(BaseModel):
+ taskId: str = Field(..., description="Task ID", json_schema_extra={"label": "Aufgaben-ID"})
+ status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status", json_schema_extra={"label": "Status"})
+ success: bool = Field(..., description="Whether the task was successful", json_schema_extra={"label": "Erfolg"})
+ feedback: Optional[str] = Field(None, description="Task feedback message", json_schema_extra={"label": "Rückmeldung"})
+ error: Optional[str] = Field(None, description="Error message if task failed", json_schema_extra={"label": "Fehler"})
-registerModelLabels(
- "ActionItem",
- {"en": "Task Action", "fr": "Action de tâche"},
- {
- "id": {"en": "Action ID", "fr": "ID de l'action"},
- "execMethod": {"en": "Method", "fr": "Méthode"},
- "execAction": {"en": "Action", "fr": "Action"},
- "execParameters": {"en": "Parameters", "fr": "Paramètres"},
- "execResultLabel": {"en": "Result Label", "fr": "Label du résultat"},
- "expectedDocumentFormats": {
- "en": "Expected Document Formats",
- "fr": "Formats de documents attendus",
- },
- "userMessage": {"en": "User Message", "fr": "Message utilisateur"},
- "status": {"en": "Status", "fr": "Statut"},
- "error": {"en": "Error", "fr": "Erreur"},
- "retryCount": {"en": "Retry Count", "fr": "Nombre de tentatives"},
- "retryMax": {"en": "Max Retries", "fr": "Tentatives max"},
- "processingTime": {"en": "Processing Time", "fr": "Temps de traitement"},
- "timestamp": {"en": "Timestamp", "fr": "Horodatage"},
- "result": {"en": "Result", "fr": "Résultat"},
- },
-)
-
-
-class TaskResult(BaseModel):
- taskId: str = Field(..., description="Task ID")
- status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status")
- success: bool = Field(..., description="Whether the task was successful")
- feedback: Optional[str] = Field(None, description="Task feedback message")
- error: Optional[str] = Field(None, description="Error message if task failed")
-
-
-registerModelLabels(
- "TaskResult",
- {"en": "Task Result", "fr": "Résultat de tâche"},
- {
- "taskId": {"en": "Task ID", "fr": "ID de la tâche"},
- "status": {"en": "Status", "fr": "Statut"},
- "success": {"en": "Success", "fr": "Succès"},
- "feedback": {"en": "Feedback", "fr": "Retour"},
- "error": {"en": "Error", "fr": "Erreur"},
- },
-)
-
-
+@i18nModel("Aufgabe")
class TaskItem(BaseModel):
- id: str = Field(..., description="Task ID")
- workflowId: str = Field(..., description="Workflow ID")
- userInput: str = Field(..., description="User input that triggered the task")
- status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status")
- error: Optional[str] = Field(None, description="Error message if task failed")
- startedAt: Optional[float] = Field(
- None, description="When the task started (UTC timestamp in seconds)"
- )
- finishedAt: Optional[float] = Field(
- None, description="When the task finished (UTC timestamp in seconds)"
- )
- actionList: List[ActionItem] = Field(
- default_factory=list, description="List of actions to execute"
- )
- retryCount: int = Field(default=0, description="Number of retries attempted")
- retryMax: int = Field(default=3, description="Maximum number of retries")
- rollbackOnFailure: bool = Field(
- default=True, description="Whether to rollback on failure"
- )
- dependencies: List[str] = Field(
- default_factory=list, description="List of task IDs this task depends on"
- )
- feedback: Optional[str] = Field(None, description="Task feedback message")
- processingTime: Optional[float] = Field(
- None, description="Total processing time in seconds"
- )
- resultLabels: Optional[Dict[str, Any]] = Field(
- default_factory=dict, description="Map of result labels to their values"
- )
-
-
-registerModelLabels(
- "TaskItem",
- {"en": "Task", "fr": "Tâche"},
- {
- "id": {"en": "Task ID", "fr": "ID de la tâche"},
- "workflowId": {"en": "Workflow ID", "fr": "ID du workflow"},
- "userInput": {"en": "User Input", "fr": "Entrée utilisateur"},
- "status": {"en": "Status", "fr": "Statut"},
- "error": {"en": "Error", "fr": "Erreur"},
- "startedAt": {"en": "Started At", "fr": "Démarré à"},
- "finishedAt": {"en": "Finished At", "fr": "Terminé à"},
- "actionList": {"en": "Actions", "fr": "Actions"},
- "retryCount": {"en": "Retry Count", "fr": "Nombre de tentatives"},
- "retryMax": {"en": "Max Retries", "fr": "Tentatives max"},
- "processingTime": {"en": "Processing Time", "fr": "Temps de traitement"},
- },
-)
-
+ id: str = Field(..., description="Task ID", json_schema_extra={"label": "Aufgaben-ID"})
+ workflowId: str = Field(..., description="Workflow ID", json_schema_extra={"label": "Workflow-ID"})
+ userInput: str = Field(..., description="User input that triggered the task", json_schema_extra={"label": "Benutzereingabe"})
+ status: TaskStatus = Field(default=TaskStatus.PENDING, description="Task status", json_schema_extra={"label": "Status"})
+ error: Optional[str] = Field(None, description="Error message if task failed", json_schema_extra={"label": "Fehler"})
+ startedAt: Optional[float] = Field(None, description="When the task started (UTC timestamp in seconds)", json_schema_extra={"label": "Gestartet am"})
+ finishedAt: Optional[float] = Field(None, description="When the task finished (UTC timestamp in seconds)", json_schema_extra={"label": "Beendet am"})
+ actionList: List[ActionItem] = Field(default_factory=list, description="List of actions to execute", json_schema_extra={"label": "Aktionen"})
+ retryCount: int = Field(default=0, description="Number of retries attempted", json_schema_extra={"label": "Wiederholungen"})
+ retryMax: int = Field(default=3, description="Maximum number of retries", json_schema_extra={"label": "Max. Wiederholungen"})
+ rollbackOnFailure: bool = Field(default=True, description="Whether to rollback on failure", json_schema_extra={"label": "Bei Fehler zurücksetzen"})
+ dependencies: List[str] = Field(default_factory=list, description="List of task IDs this task depends on", json_schema_extra={"label": "Abhängigkeiten"})
+ feedback: Optional[str] = Field(None, description="Task feedback message", json_schema_extra={"label": "Rückmeldung"})
+ processingTime: Optional[float] = Field(None, description="Total processing time in seconds", json_schema_extra={"label": "Bearbeitungszeit"})
+ resultLabels: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Map of result labels to their values", json_schema_extra={"label": "Ergebnis-Labels"})
+@i18nModel("Aufgabenschritt")
class TaskStep(BaseModel):
- id: str
- objective: str
- dependencies: Optional[list[str]] = Field(default_factory=list)
- successCriteria: Optional[list[str]] = Field(default_factory=list)
+ id: str = Field(description="Task identifier", json_schema_extra={"label": "ID"})
+ objective: str = Field(description="Task objective", json_schema_extra={"label": "Ziel"})
+ dependencies: Optional[list[str]] = Field(default_factory=list, json_schema_extra={"label": "ID"})
+ successCriteria: Optional[list[str]] = Field(default_factory=list, json_schema_extra={"label": "Erfolgskriterien"})
estimatedComplexity: Optional[str] = None
- userMessage: Optional[str] = Field(
- None, description="User-friendly message in user's language"
- )
+ userMessage: Optional[str] = Field(None, description="User-friendly message in user's language", json_schema_extra={"label": "Benutzernachricht"})
# Format details extracted from intent analysis
- dataType: Optional[str] = Field(
- None, description="Expected data type (text, numbers, documents, etc.)"
- )
- expectedFormats: Optional[List[str]] = Field(
- None, description="Expected output file format extensions (e.g., ['docx', 'pdf', 'xlsx']). Use actual file extensions, not conceptual terms."
- )
- qualityRequirements: Optional[Dict[str, Any]] = Field(
- None, description="Quality requirements and constraints"
- )
-
-
-registerModelLabels(
- "TaskStep",
- {"en": "Task Step", "fr": "Étape de tâche"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "objective": {"en": "Objective", "fr": "Objectif"},
- "dependencies": {"en": "Dependencies", "fr": "Dépendances"},
- "successCriteria": {"en": "Success Criteria", "fr": "Critères de succès"},
- "estimatedComplexity": {
- "en": "Estimated Complexity",
- "fr": "Complexité estimée",
- },
- "userMessage": {"en": "User Message", "fr": "Message utilisateur"},
- "expectedFormats": {"en": "Expected Formats", "fr": "Formats attendus"},
- },
-)
-
+ dataType: Optional[str] = Field(None, description="Expected data type (text, numbers, documents, etc.)", json_schema_extra={"label": "Datentyp"})
+ expectedFormats: Optional[List[str]] = Field(None, description="Expected output file format extensions (e.g., ['docx', 'pdf', 'xlsx']). Use actual file extensions, not conceptual terms.", json_schema_extra={"label": "Erwartete Formate"})
+ qualityRequirements: Optional[Dict[str, Any]] = Field(None, description="Quality requirements and constraints", json_schema_extra={"label": "Qualitätsanforderungen"})
+@i18nModel("Aufgabenübergabe")
+@i18nModel("Aufgabenübergabe")
class TaskHandover(BaseModel):
- taskId: str = Field(description="Target task ID")
- sourceTask: Optional[str] = Field(None, description="Source task ID")
- inputDocuments: List[DocumentExchange] = Field(
- default_factory=list, description="Available input documents"
- )
- outputDocuments: List[DocumentExchange] = Field(
- default_factory=list, description="Produced output documents"
- )
- context: Dict[str, Any] = Field(default_factory=dict, description="Task context")
- previousResults: List[str] = Field(
- default_factory=list, description="Previous result summaries"
- )
- improvements: List[str] = Field(
- default_factory=list, description="Improvement suggestions"
- )
- workflowSummary: Optional[str] = Field(
- None, description="Summarized workflow context"
- )
- messageHistory: List[str] = Field(
- default_factory=list, description="Key message summaries"
- )
- timestamp: float = Field(
- ..., description="When the handover was created (UTC timestamp in seconds)"
- )
- handoverType: str = Field(
- default="task", description="Type of handover: task, phase, or workflow"
- )
-
-
-registerModelLabels(
- "TaskHandover",
- {"en": "Task Handover", "fr": "Transfert de tâche"},
- {
- "taskId": {"en": "Task ID", "fr": "ID de la tâche"},
- "sourceTask": {"en": "Source Task", "fr": "Tâche source"},
- "inputDocuments": {"en": "Input Documents", "fr": "Documents d'entrée"},
- "outputDocuments": {"en": "Output Documents", "fr": "Documents de sortie"},
- "context": {"en": "Context", "fr": "Contexte"},
- "previousResults": {"en": "Previous Results", "fr": "Résultats précédents"},
- "improvements": {"en": "Improvements", "fr": "Améliorations"},
- "workflowSummary": {"en": "Workflow Summary", "fr": "Résumé du workflow"},
- "messageHistory": {"en": "Message History", "fr": "Historique des messages"},
- "timestamp": {"en": "Timestamp", "fr": "Horodatage"},
- "handoverType": {"en": "Handover Type", "fr": "Type de transfert"},
- },
-)
-
+ taskId: str = Field(description="Target task ID", json_schema_extra={"label": "Aufgaben-ID"})
+ sourceTask: Optional[str] = Field(None, description="Source task ID", json_schema_extra={"label": "Quell-Aufgabe"})
+ inputDocuments: List[DocumentExchange] = Field(default_factory=list, description="Available input documents", json_schema_extra={"label": "Eingabedokumente"})
+ outputDocuments: List[DocumentExchange] = Field(default_factory=list, description="Produced output documents", json_schema_extra={"label": "Ausgabedokumente"})
+ context: Dict[str, Any] = Field(default_factory=dict, description="Task context", json_schema_extra={"label": "Kontext"})
+ previousResults: List[str] = Field(default_factory=list, description="Previous result summaries", json_schema_extra={"label": "Vorherige Ergebnisse"})
+ improvements: List[str] = Field(default_factory=list, description="Improvement suggestions", json_schema_extra={"label": "Verbesserungen"})
+ workflowSummary: Optional[str] = Field(None, description="Summarized workflow context", json_schema_extra={"label": "Workflow-Zusammenfassung"})
+ messageHistory: List[str] = Field(default_factory=list, description="Key message summaries", json_schema_extra={"label": "Nachrichtenverlauf"})
+ timestamp: float = Field(..., description="When the handover was created (UTC timestamp in seconds)", json_schema_extra={"label": "Zeitstempel"})
+ handoverType: str = Field(default="task", description="Type of handover: task, phase, or workflow", json_schema_extra={"label": "Übergabetyp"})
class TaskContext(BaseModel):
taskStep: TaskStep
@@ -849,7 +402,6 @@ class TaskContext(BaseModel):
self.improvements = []
self.improvements.append(improvement)
-
class ReviewContext(BaseModel):
taskStep: TaskStep
taskActions: Optional[list] = Field(default_factory=list)
@@ -858,99 +410,40 @@ class ReviewContext(BaseModel):
workflowId: Optional[str] = None
previousResults: Optional[list[str]] = Field(default_factory=list)
-
+@i18nModel("Prüfergebnis")
+@i18nModel("Prüfergebnis")
class ReviewResult(BaseModel):
status: str
reason: Optional[str] = None
- improvements: Optional[list[str]] = Field(default_factory=list)
- qualityScore: Optional[float] = Field(default=5.0, description="Quality score (0-10)")
- missingOutputs: Optional[list[str]] = Field(default_factory=list)
- metCriteria: Optional[list[str]] = Field(default_factory=list)
- unmetCriteria: Optional[list[str]] = Field(default_factory=list)
+ improvements: Optional[list[str]] = Field(default_factory=list, json_schema_extra={"label": "Verbesserungen"})
+ qualityScore: Optional[float] = Field(default=5.0, description="Quality score (0-10)", json_schema_extra={"label": "Qualitätsscore"})
+ missingOutputs: Optional[list[str]] = Field(default_factory=list, json_schema_extra={"label": "Fehlende Ausgaben"})
+ metCriteria: Optional[list[str]] = Field(default_factory=list, json_schema_extra={"label": "Erfüllte Kriterien"})
+ unmetCriteria: Optional[list[str]] = Field(default_factory=list, json_schema_extra={"label": "Nicht erfüllte Kriterien"})
confidence: Optional[float] = 0.5
- userMessage: Optional[str] = Field(
- None, description="User-friendly message in user's language"
- )
+ userMessage: Optional[str] = Field(None, description="User-friendly message in user's language", json_schema_extra={"label": "Benutzernachricht"})
# NEW: Concrete next action guidance (when status is "continue")
- nextAction: Optional[str] = Field(
- None, description="Specific action to execute next (e.g., 'ai.convert', 'ai.process', 'ai.reformat')"
- )
- nextActionParameters: Optional[Dict[str, Any]] = Field(
- None, description="Parameters for the next action (e.g., {'fromFormat': 'json', 'toFormat': 'csv'})"
- )
- nextActionObjective: Optional[str] = Field(
- None, description="What this specific action will achieve"
- )
-
-
-registerModelLabels(
- "ReviewResult",
- {"en": "Review Result", "fr": "Résultat de l'évaluation"},
- {
- "status": {"en": "Status", "fr": "Statut"},
- "reason": {"en": "Reason", "fr": "Raison"},
- "improvements": {"en": "Improvements", "fr": "Améliorations"},
- "qualityScore": {"en": "Quality Score", "fr": "Score de qualité"},
- "missingOutputs": {"en": "Missing Outputs", "fr": "Sorties manquantes"},
- "metCriteria": {"en": "Met Criteria", "fr": "Critères respectés"},
- "unmetCriteria": {"en": "Unmet Criteria", "fr": "Critères non respectés"},
- "confidence": {"en": "Confidence", "fr": "Confiance"},
- "userMessage": {"en": "User Message", "fr": "Message utilisateur"},
- },
-)
-
+ nextAction: Optional[str] = Field(None, description="Specific action to execute next (e.g., 'ai.convert', 'ai.process', 'ai.reformat')", json_schema_extra={"label": "Nächste Aktion"})
+ nextActionParameters: Optional[Dict[str, Any]] = Field(None, description="Parameters for the next action (e.g., {'fromFormat': 'json', 'toFormat': 'csv'})", json_schema_extra={"label": "Parameter nächste Aktion"})
+ nextActionObjective: Optional[str] = Field(None, description="What this specific action will achieve", json_schema_extra={"label": "Ziel nächste Aktion"})
+@i18nModel("Aufgabenplan")
class TaskPlan(BaseModel):
- overview: str
- tasks: list[TaskStep]
- userMessage: Optional[str] = Field(
- None, description="Overall user-friendly message for the task plan"
- )
-
-
-registerModelLabels(
- "TaskPlan",
- {"en": "Task Plan", "fr": "Plan de tâches"},
- {
- "overview": {"en": "Overview", "fr": "Aperçu"},
- "tasks": {"en": "Tasks", "fr": "Tâches"},
- "userMessage": {"en": "User Message", "fr": "Message utilisateur"},
- },
-)
+ overview: str = Field(json_schema_extra={"label": "Überblick"})
+ tasks: list[TaskStep] = Field(json_schema_extra={"label": "Aufgaben"})
+ userMessage: Optional[str] = Field(None, description="Overall user-friendly message for the task plan", json_schema_extra={"label": "Benutzernachricht"})
# Forward references resolved automatically since ChatWorkflow is defined above
-
+@i18nModel("Prompt-Platzhalter")
class PromptPlaceholder(BaseModel):
- label: str
- content: str
- summaryAllowed: bool = Field(
- default=False,
- description="Whether host may summarize content before sending to AI",
- )
-
-
-registerModelLabels(
- "PromptPlaceholder",
- {"en": "Prompt Placeholder", "fr": "Espace réservé d'invite"},
- {
- "label": {"en": "Label", "fr": "Libellé"},
- "content": {"en": "Content", "fr": "Contenu"},
- "summaryAllowed": {"en": "Summary Allowed", "fr": "Résumé autorisé"},
- },
-)
-
+ label: str = Field(json_schema_extra={"label": "Bezeichnung"})
+ content: str = Field(json_schema_extra={"label": "Inhalt"})
+ summaryAllowed: bool = Field(default=False,
+ description="Whether host may summarize content before sending to AI", json_schema_extra={"label": "Zusammenfassung erlaubt"})
+@i18nModel("Prompt-Paket")
class PromptBundle(BaseModel):
- prompt: str
- placeholders: List[PromptPlaceholder] = Field(default_factory=list)
+ prompt: str = Field(json_schema_extra={"label": "Prompt"})
+ placeholders: List[PromptPlaceholder] = Field(default_factory=list, json_schema_extra={"label": "Prompt"})
-
-registerModelLabels(
- "PromptBundle",
- {"en": "Prompt Bundle", "fr": "Lot d'invite"},
- {
- "prompt": {"en": "Prompt", "fr": "Invite"},
- "placeholders": {"en": "Placeholders", "fr": "Espaces réservés"},
- },
-)
diff --git a/modules/datamodels/datamodelDataSource.py b/modules/datamodels/datamodelDataSource.py
index 1d432041..0e0a7d16 100644
--- a/modules/datamodels/datamodelDataSource.py
+++ b/modules/datamodels/datamodelDataSource.py
@@ -9,66 +9,81 @@ Google Drive folder, FTP directory, etc.) for agent-accessible data containers.
from typing import Dict, Any, Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
+@i18nModel("Datenquelle")
class DataSource(PowerOnModel):
- """Configured external data source linked to a UserConnection."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- connectionId: str = Field(description="FK to UserConnection")
- sourceType: str = Field(
- description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder, clickupList (path under /team/...)"
+ """Konfigurierte externe Datenquelle verknuepft mit einer UserConnection."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ connectionId: str = Field(
+ description="FK to UserConnection",
+ json_schema_extra={"label": "Verbindungs-ID"},
+ )
+ sourceType: str = Field(
+ description="sharepointFolder, googleDriveFolder, outlookFolder, ftpFolder, clickupList (path under /team/...)",
+ json_schema_extra={"label": "Quellentyp"},
+ )
+ path: str = Field(
+ description="External path (e.g. '/sites/MySite/Documents/Reports')",
+ json_schema_extra={"label": "Pfad"},
+ )
+ label: str = Field(
+ description="User-visible label (often the last path segment)",
+ json_schema_extra={"label": "Bezeichnung"},
)
- path: str = Field(description="External path (e.g. '/sites/MySite/Documents/Reports')")
- label: str = Field(description="User-visible label (often the last path segment)")
displayPath: Optional[str] = Field(
default=None,
description="Human-readable full path for UI (connection-relative, slash-separated)",
+ json_schema_extra={"label": "Anzeigepfad"},
+ )
+ featureInstanceId: Optional[str] = Field(
+ default=None,
+ description="Scoped to feature instance",
+ json_schema_extra={"label": "Feature-Instanz"},
+ )
+ mandateId: Optional[str] = Field(
+ default=None,
+ description="Mandate scope",
+ json_schema_extra={"label": "Mandanten-ID"},
+ )
+ userId: str = Field(
+ default="",
+ description="Owner user ID",
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ autoSync: bool = Field(
+ default=False,
+ description="Automatically sync on schedule",
+ json_schema_extra={"label": "Auto-Sync"},
+ )
+ lastSynced: Optional[float] = Field(
+ default=None,
+ description="Last sync timestamp",
+ json_schema_extra={"label": "Letzter Sync"},
)
- featureInstanceId: Optional[str] = Field(default=None, description="Scoped to feature instance")
- mandateId: Optional[str] = Field(default=None, description="Mandate scope")
- userId: str = Field(default="", description="Owner user ID")
- autoSync: bool = Field(default=False, description="Automatically sync on schedule")
- lastSynced: Optional[float] = Field(default=None, description="Last sync timestamp")
scope: str = Field(
default="personal",
description="Data visibility scope: personal, featureInstance, mandate, global",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}},
- {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}},
- {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}},
- {"value": "global", "label": {"en": "Global", "de": "Global"}},
- ]}
+ json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "personal", "label": "Persönlich"},
+ {"value": "featureInstance", "label": "Feature-Instanz"},
+ {"value": "mandate", "label": "Mandant"},
+ {"value": "global", "label": "Global"},
+ ]},
)
neutralize: bool = Field(
default=False,
description="Whether this data source should be neutralized before AI processing",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
)
-registerModelLabels(
- "DataSource",
- {"en": "Data Source", "de": "Datenquelle", "fr": "Source de données"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "connectionId": {"en": "Connection ID", "de": "Verbindungs-ID", "fr": "ID de connexion"},
- "sourceType": {"en": "Source Type", "de": "Quellentyp", "fr": "Type de source"},
- "path": {"en": "Path", "de": "Pfad", "fr": "Chemin"},
- "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
- "displayPath": {"en": "Display path", "de": "Anzeigepfad", "fr": "Chemin affiché"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance de fonctionnalité"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
- "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"},
- "autoSync": {"en": "Auto Sync", "de": "Auto-Sync", "fr": "Synchro auto"},
- "lastSynced": {"en": "Last Synced", "de": "Letzter Sync", "fr": "Dernier sync"},
- "scope": {"en": "Scope", "de": "Sichtbarkeit"},
- "neutralize": {"en": "Neutralize", "de": "Neutralisieren"},
- },
-)
-
-
class ExternalEntry(BaseModel):
"""An item (file or folder) from an external data source."""
name: str = Field(description="Item name")
diff --git a/modules/datamodels/datamodelDocref.py b/modules/datamodels/datamodelDocref.py
index b4c5924e..e4a43bd2 100644
--- a/modules/datamodels/datamodelDocref.py
+++ b/modules/datamodels/datamodelDocref.py
@@ -6,7 +6,7 @@ Document reference models for typed document references in workflows.
from typing import List, Optional
from pydantic import BaseModel, Field
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
class DocumentReference(BaseModel):
@@ -14,11 +14,19 @@ class DocumentReference(BaseModel):
pass
+@i18nModel("Dokumentlisten-Referenz")
class DocumentListReference(DocumentReference):
"""Reference to a document list via message label"""
- messageId: Optional[str] = Field(None, description="Optional message ID for cross-round references")
- label: str = Field(description="Document list label")
-
+ messageId: Optional[str] = Field(
+ None,
+ description="Optional message ID for cross-round references",
+ json_schema_extra={"label": "Nachrichten-ID"},
+ )
+ label: str = Field(
+ description="Document list label",
+ json_schema_extra={"label": "Bezeichnung"},
+ )
+
def to_string(self) -> str:
"""Convert to string format: docList:messageId:label or docList:label"""
if self.messageId:
@@ -26,11 +34,19 @@ class DocumentListReference(DocumentReference):
return f"docList:{self.label}"
+@i18nModel("Dokumentelement-Referenz")
class DocumentItemReference(DocumentReference):
"""Reference to a specific document item"""
- documentId: str = Field(description="Document ID")
- fileName: Optional[str] = Field(None, description="Optional file name")
-
+ documentId: str = Field(
+ description="Document ID",
+ json_schema_extra={"label": "Dokument-ID"},
+ )
+ fileName: Optional[str] = Field(
+ None,
+ description="Optional file name",
+ json_schema_extra={"label": "Dateiname"},
+ )
+
def to_string(self) -> str:
"""Convert to string format: docItem:documentId:fileName or docItem:documentId"""
if self.fileName:
@@ -38,21 +54,23 @@ class DocumentItemReference(DocumentReference):
return f"docItem:{self.documentId}"
+@i18nModel("Dokumentreferenz-Liste")
class DocumentReferenceList(BaseModel):
"""List of document references with conversion methods"""
references: List[DocumentReference] = Field(
default_factory=list,
- description="List of document references"
+ description="List of document references",
+ json_schema_extra={"label": "Referenzen"},
)
-
+
def to_string_list(self) -> List[str]:
"""Convert all references to string list"""
return [ref.to_string() for ref in self.references]
-
+
@classmethod
def from_string_list(cls, stringList: List[str]) -> "DocumentReferenceList":
"""Parse string list to typed references
-
+
Supports formats:
- docList:label
- docList:messageId:label
@@ -60,13 +78,13 @@ class DocumentReferenceList(BaseModel):
- docItem:documentId:fileName
"""
references = []
-
+
for refStr in stringList:
if not refStr or not isinstance(refStr, str):
continue
-
+
refStr = refStr.strip()
-
+
# Parse docList: references
if refStr.startswith("docList:"):
parts = refStr[8:].split(":", 1) # Remove "docList:" prefix
@@ -77,7 +95,7 @@ class DocumentReferenceList(BaseModel):
elif len(parts) == 1 and parts[0]:
# docList:label
references.append(DocumentListReference(label=parts[0]))
-
+
# Parse docItem: references
elif refStr.startswith("docItem:"):
parts = refStr[8:].split(":", 1) # Remove "docItem:" prefix
@@ -88,33 +106,12 @@ class DocumentReferenceList(BaseModel):
elif len(parts) == 1 and parts[0]:
# docItem:documentId
references.append(DocumentItemReference(documentId=parts[0]))
-
+
# Unknown format - skip or log warning
else:
# Try to parse as simple string (backward compatibility)
# Assume it's a label if it doesn't match known patterns
if refStr:
references.append(DocumentListReference(label=refStr))
-
+
return cls(references=references)
-
-
-registerModelLabels(
- "DocumentReference",
- {"en": "Document Reference", "fr": "Référence de document"},
- {
- "messageId": {"en": "Message ID", "fr": "ID du message"},
- "label": {"en": "Label", "fr": "Étiquette"},
- "documentId": {"en": "Document ID", "fr": "ID du document"},
- "fileName": {"en": "File Name", "fr": "Nom du fichier"},
- },
-)
-
-registerModelLabels(
- "DocumentReferenceList",
- {"en": "Document Reference List", "fr": "Liste de références de documents"},
- {
- "references": {"en": "References", "fr": "Références"},
- },
-)
-
diff --git a/modules/datamodels/datamodelDocument.py b/modules/datamodels/datamodelDocument.py
index a5cd6b0c..e34c82ff 100644
--- a/modules/datamodels/datamodelDocument.py
+++ b/modules/datamodels/datamodelDocument.py
@@ -1,7 +1,7 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
from typing import Any, Dict, List, Optional, Literal, Union
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, field_serializer
from datetime import datetime
@@ -116,11 +116,12 @@ class RenderedDocument(BaseModel):
filename: str = Field(description="Filename for the document (e.g., 'report.html', 'image.png')")
documentType: Optional[str] = Field(default=None, description="Type of document (e.g., 'report', 'invoice', 'analysis')")
metadata: Optional[Dict[str, Any]] = Field(default=None, description="Document metadata (title, author, etc.)")
-
- class Config:
- json_encoders = {
- bytes: lambda v: v.decode('utf-8', errors='replace') if isinstance(v, bytes) else v
- }
+
+ @field_serializer("documentData")
+ def _serializeDocumentData(self, v: bytes) -> str:
+ if isinstance(v, bytes):
+ return v.decode("utf-8", errors="replace")
+ return str(v)
# Update forward references
diff --git a/modules/datamodels/datamodelFeatureDataSource.py b/modules/datamodels/datamodelFeatureDataSource.py
index 02de0a67..3199a054 100644
--- a/modules/datamodels/datamodelFeatureDataSource.py
+++ b/modules/datamodels/datamodelFeatureDataSource.py
@@ -9,54 +9,69 @@ so the agent can query structured feature data (e.g. TrusteePosition rows).
from typing import Dict, Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
+@i18nModel("Feature-Datenquelle")
class FeatureDataSource(PowerOnModel):
- """A feature-instance table attached as data source in the AI workspace."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- featureInstanceId: str = Field(description="FK to FeatureInstance")
- featureCode: str = Field(description="Feature code (e.g. trustee, commcoach)")
- tableName: str = Field(description="Table name from DATA_OBJECTS meta (e.g. TrusteePosition)")
- objectKey: str = Field(description="RBAC object key (e.g. data.feature.trustee.TrusteePosition)")
- label: str = Field(description="User-visible label")
- mandateId: str = Field(default="", description="Mandate scope")
- userId: str = Field(default="", description="Owner user ID")
- workspaceInstanceId: str = Field(description="Workspace instance where this source is used")
+ """Feature-Instanz-Tabelle als Datenquelle im AI-Workspace."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ featureInstanceId: str = Field(
+ description="FK to FeatureInstance",
+ json_schema_extra={"label": "Feature-Instanz"},
+ )
+ featureCode: str = Field(
+ description="Feature code (e.g. trustee, commcoach)",
+ json_schema_extra={"label": "Feature"},
+ )
+ tableName: str = Field(
+ description="Table name from DATA_OBJECTS meta (e.g. TrusteePosition)",
+ json_schema_extra={"label": "Tabelle"},
+ )
+ objectKey: str = Field(
+ description="RBAC object key (e.g. data.feature.trustee.TrusteePosition)",
+ json_schema_extra={"label": "Objekt-Schluessel"},
+ )
+ label: str = Field(
+ description="User-visible label",
+ json_schema_extra={"label": "Bezeichnung"},
+ )
+ mandateId: str = Field(
+ default="",
+ description="Mandate scope",
+ json_schema_extra={"label": "Mandant"},
+ )
+ userId: str = Field(
+ default="",
+ description="Owner user ID",
+ json_schema_extra={"label": "Benutzer"},
+ )
+ workspaceInstanceId: str = Field(
+ description="Workspace instance where this source is used",
+ json_schema_extra={"label": "Workspace"},
+ )
scope: str = Field(
default="personal",
description="Data visibility scope: personal, featureInstance, mandate, global",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}},
- {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}},
- {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}},
- {"value": "global", "label": {"en": "Global", "de": "Global"}},
- ]}
+ json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "personal", "label": "Persönlich"},
+ {"value": "featureInstance", "label": "Feature-Instanz"},
+ {"value": "mandate", "label": "Mandant"},
+ {"value": "global", "label": "Global"},
+ ]},
)
neutralize: bool = Field(
default=False,
description="Whether this data source should be neutralized before AI processing",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
)
recordFilter: Optional[Dict[str, str]] = Field(
default=None,
description="Record-level filter applied when querying this table, e.g. {'sessionId': 'abc-123'}",
+ json_schema_extra={"label": "Datensatzfilter"},
)
-
-
-registerModelLabels(
- "FeatureDataSource",
- {"en": "Feature Data Source", "de": "Feature-Datenquelle", "fr": "Source de données fonctionnalité"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- "featureCode": {"en": "Feature", "de": "Feature", "fr": "Fonctionnalité"},
- "tableName": {"en": "Table", "de": "Tabelle", "fr": "Table"},
- "objectKey": {"en": "Object Key", "de": "Objekt-Schlüssel", "fr": "Clé objet"},
- "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
- "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
- "userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
- "workspaceInstanceId": {"en": "Workspace", "de": "Workspace", "fr": "Espace de travail"},
- },
-)
diff --git a/modules/datamodels/datamodelFeatures.py b/modules/datamodels/datamodelFeatures.py
index 3134a18e..93a7fae9 100644
--- a/modules/datamodels/datamodelFeatures.py
+++ b/modules/datamodels/datamodelFeatures.py
@@ -6,85 +6,56 @@ import uuid
from typing import Optional, Dict, Any
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.datamodels.datamodelUtils import TextMultilingual
+@i18nModel("Feature")
class Feature(PowerOnModel):
- """
- Feature-Definition (global, z.B. 'trustee', 'chatbot').
- Features sind die verfügbaren Funktionalitäten der Plattform.
- """
+ """Feature-Definition (global, z.B. 'trustee', 'chatbot'). Verfuegbare Funktionalitaeten der Plattform."""
code: str = Field(
description="Unique feature code (Primary Key), z.B. 'trustee', 'chatbot'",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Code", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
)
label: TextMultilingual = Field(
description="Feature label in multiple languages (I18n)",
- json_schema_extra={"frontend_type": "multilingual", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Bezeichnung", "frontend_type": "multilingual", "frontend_readonly": False, "frontend_required": True}
)
icon: str = Field(
default="",
description="Icon identifier for the feature",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Symbol", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
)
-registerModelLabels(
- "Feature",
- {"en": "Feature", "de": "Feature", "fr": "Fonctionnalité"},
- {
- "code": {"en": "Code", "de": "Code", "fr": "Code"},
- "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
- "icon": {"en": "Icon", "de": "Symbol", "fr": "Icône"},
- },
-)
-
-
+@i18nModel("Feature-Instanz")
class FeatureInstance(PowerOnModel):
- """
- Instanz eines Features in einem Mandanten.
- Ein Mandant kann mehrere Instanzen desselben Features haben.
- """
+ """Instanz eines Features in einem Mandanten. Ein Mandant kann mehrere Instanzen desselben Features haben."""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the feature instance",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
featureCode: str = Field(
- description="FK → Feature.code",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": True}
+ description="FK -> Feature.code",
+ json_schema_extra={"label": "Feature", "frontend_type": "select", "frontend_readonly": True, "frontend_required": True}
)
mandateId: str = Field(
- description="FK → Mandate.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ description="FK -> Mandate.id (CASCADE DELETE)",
+ json_schema_extra={"label": "Mandant", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
label: str = Field(
default="",
description="Instance label, z.B. 'Buchhaltung 2025'",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Bezeichnung", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
)
enabled: bool = Field(
default=True,
description="Whether this feature instance is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
)
config: Optional[Dict[str, Any]] = Field(
default=None,
description="Instance-specific configuration (JSONB). Structure depends on featureCode.",
- json_schema_extra={"frontend_type": "json", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Konfiguration", "frontend_type": "json", "frontend_readonly": False, "frontend_required": False}
)
-
-
-registerModelLabels(
- "FeatureInstance",
- {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance de fonctionnalité"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "featureCode": {"en": "Feature", "de": "Feature", "fr": "Fonctionnalité"},
- "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
- "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
- "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"},
- "config": {"en": "Configuration", "de": "Konfiguration", "fr": "Configuration"},
- },
-)
diff --git a/modules/datamodels/datamodelFileFolder.py b/modules/datamodels/datamodelFileFolder.py
index 23cd197b..73222e51 100644
--- a/modules/datamodels/datamodelFileFolder.py
+++ b/modules/datamodels/datamodelFileFolder.py
@@ -5,26 +5,34 @@
from typing import Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
+@i18nModel("Dateiordner")
class FileFolder(PowerOnModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- name: str = Field(description="Folder name", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
- parentId: Optional[str] = Field(default=None, description="Parent folder ID (null = root)", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
- mandateId: Optional[str] = Field(default=None, description="Mandate context", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- featureInstanceId: Optional[str] = Field(default=None, description="Feature instance context", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
-
-
-registerModelLabels(
- "FileFolder",
- {"en": "File Folder", "fr": "Dossier de fichiers"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "name": {"en": "Name", "fr": "Nom"},
- "parentId": {"en": "Parent Folder", "fr": "Dossier parent"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"},
- },
-)
+ """Hierarchischer Ordner fuer die Dateiverwaltung."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ name: str = Field(
+ description="Folder name",
+ json_schema_extra={"label": "Name", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
+ )
+ parentId: Optional[str] = Field(
+ default=None,
+ description="Parent folder ID (null = root)",
+ json_schema_extra={"label": "Uebergeordneter Ordner", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
+ )
+ mandateId: Optional[str] = Field(
+ default=None,
+ description="Mandate context",
+ json_schema_extra={"label": "Mandanten-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ featureInstanceId: Optional[str] = Field(
+ default=None,
+ description="Feature instance context",
+ json_schema_extra={"label": "Feature-Instanz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
diff --git a/modules/datamodels/datamodelFiles.py b/modules/datamodels/datamodelFiles.py
index b8a44d2c..0bf79bca 100644
--- a/modules/datamodels/datamodelFiles.py
+++ b/modules/datamodels/datamodelFiles.py
@@ -5,66 +5,110 @@
from typing import Dict, Any, List, Optional, Union
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
import base64
+@i18nModel("Datei")
class FileItem(PowerOnModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- mandateId: Optional[str] = Field(default="", description="ID of the mandate this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- featureInstanceId: Optional[str] = Field(default="", description="ID of the feature instance this file belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "frontend_fk_source": "/api/features/instances", "frontend_fk_display_field": "label"})
- fileName: str = Field(description="Name of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
- mimeType: str = Field(description="MIME type of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- fileHash: str = Field(description="Hash of the file", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- fileSize: int = Field(description="Size of the file in bytes", json_schema_extra={"frontend_type": "integer", "frontend_readonly": True, "frontend_required": False})
- tags: Optional[List[str]] = Field(default=None, description="Tags for categorization and search", json_schema_extra={"frontend_type": "tags", "frontend_readonly": False, "frontend_required": False})
- folderId: Optional[str] = Field(default=None, description="ID of the parent folder", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
- description: Optional[str] = Field(default=None, description="User-provided description of the file", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False})
- status: Optional[str] = Field(default=None, description="Processing status: pending, extracted, embedding, indexed, failed", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
+ """Metadaten einer gespeicherten Datei."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ fileName: str = Field(
+ description="Name of the file",
+ json_schema_extra={"label": "Dateiname", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
+ )
+ mandateId: Optional[str] = Field(
+ default="",
+ description="ID of the mandate this file belongs to",
+ json_schema_extra={"label": "Mandant", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "frontend_fk_source": "/api/mandates/", "frontend_fk_display_field": "label"},
+ )
+ featureInstanceId: Optional[str] = Field(
+ default="",
+ description="ID of the feature instance this file belongs to",
+ json_schema_extra={"label": "Feature-Instanz", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "frontend_fk_source": "/api/features/instances", "frontend_fk_display_field": "label"},
+ )
+ mimeType: str = Field(
+ description="MIME type of the file",
+ json_schema_extra={"label": "MIME-Typ", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ fileHash: str = Field(
+ description="Hash of the file",
+ json_schema_extra={"label": "Datei-Hash", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ fileSize: int = Field(
+ description="Size of the file in bytes",
+ json_schema_extra={"label": "Dateigroesse", "frontend_type": "integer", "frontend_readonly": True, "frontend_required": False},
+ )
+ tags: Optional[List[str]] = Field(
+ default=None,
+ description="Tags for categorization and search",
+ json_schema_extra={"label": "Tags", "frontend_type": "tags", "frontend_readonly": False, "frontend_required": False},
+ )
+ folderId: Optional[str] = Field(
+ default=None,
+ description="ID of the parent folder",
+ json_schema_extra={"label": "Ordner-ID", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
+ )
+ description: Optional[str] = Field(
+ default=None,
+ description="User-provided description of the file",
+ json_schema_extra={"label": "Beschreibung", "frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False},
+ )
+ status: Optional[str] = Field(
+ default=None,
+ description="Processing status: pending, extracted, embedding, indexed, failed",
+ json_schema_extra={"label": "Status", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
scope: str = Field(
default="personal",
description="Data visibility scope: personal, featureInstance, mandate, global",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}},
- {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}},
- {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}},
- {"value": "global", "label": {"en": "Global", "de": "Global"}},
- ]}
+ json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "personal", "label": "Persönlich"},
+ {"value": "featureInstance", "label": "Feature-Instanz"},
+ {"value": "mandate", "label": "Mandant"},
+ {"value": "global", "label": "Global"},
+ ]},
)
neutralize: bool = Field(
default=False,
description="Whether this file should be neutralized before AI processing",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Neutralisieren", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
)
-registerModelLabels(
- "FileItem",
- {"en": "File Item", "fr": "Élément de fichier"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité"},
- "fileName": {"en": "fileName", "fr": "Nom de fichier"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- "fileHash": {"en": "File Hash", "fr": "Hash du fichier"},
- "fileSize": {"en": "File Size", "fr": "Taille du fichier"},
- "tags": {"en": "Tags", "fr": "Tags"},
- "folderId": {"en": "Folder ID", "fr": "ID du dossier"},
- "description": {"en": "Description", "fr": "Description"},
- "status": {"en": "Status", "fr": "Statut"},
- "scope": {"en": "Scope", "de": "Sichtbarkeit"},
- "neutralize": {"en": "Neutralize", "de": "Neutralisieren"},
- },
-)
+@i18nModel("Datei-Vorschau")
class FilePreview(BaseModel):
- content: Union[str, bytes] = Field(description="File content (text or binary)")
- mimeType: str = Field(description="MIME type of the file")
- fileName: str = Field(description="Original fileName")
- isText: bool = Field(description="Whether the content is text (True) or binary (False)")
- encoding: Optional[str] = Field(None, description="Text encoding if content is text")
- size: int = Field(description="Size of the content in bytes")
+ """Vorschau-Inhalt einer Datei fuer die Anzeige."""
+ content: Union[str, bytes] = Field(
+ description="File content (text or binary)",
+ json_schema_extra={"label": "Inhalt"},
+ )
+ mimeType: str = Field(
+ description="MIME type of the file",
+ json_schema_extra={"label": "MIME-Typ"},
+ )
+ fileName: str = Field(
+ description="Original fileName",
+ json_schema_extra={"label": "Dateiname"},
+ )
+ isText: bool = Field(
+ description="Whether the content is text (True) or binary (False)",
+ json_schema_extra={"label": "Ist Text"},
+ )
+ encoding: Optional[str] = Field(
+ None,
+ description="Text encoding if content is text",
+ json_schema_extra={"label": "Kodierung"},
+ )
+ size: int = Field(
+ description="Size of the content in bytes",
+ json_schema_extra={"label": "Groesse"},
+ )
def toDictWithBase64Encoding(self) -> Dict[str, Any]:
"""Convert to dictionary with base64 encoding for binary content."""
@@ -72,29 +116,21 @@ class FilePreview(BaseModel):
if isinstance(data.get("content"), bytes):
data["content"] = base64.b64encode(data["content"]).decode("utf-8")
return data
-registerModelLabels(
- "FilePreview",
- {"en": "File Preview", "fr": "Aperçu du fichier"},
- {
- "content": {"en": "Content", "fr": "Contenu"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- "fileName": {"en": "fileName", "fr": "Nom de fichier"},
- "isText": {"en": "Is Text", "fr": "Est du texte"},
- "encoding": {"en": "Encoding", "fr": "Encodage"},
- "size": {"en": "Size", "fr": "Taille"},
- },
-)
+
+@i18nModel("Dateidaten")
class FileData(PowerOnModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- data: str = Field(description="File data content")
- base64Encoded: bool = Field(description="Whether the data is base64 encoded")
-registerModelLabels(
- "FileData",
- {"en": "File Data", "fr": "Données de fichier"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "data": {"en": "Data", "fr": "Données"},
- "base64Encoded": {"en": "Base64 Encoded", "fr": "Encodé en Base64"},
- },
-)
+ """Rohdaten einer Datei (z.B. Base64)."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ data: str = Field(
+ description="File data content",
+ json_schema_extra={"label": "Daten"},
+ )
+ base64Encoded: bool = Field(
+ description="Whether the data is base64 encoded",
+ json_schema_extra={"label": "Base64-kodiert"},
+ )
diff --git a/modules/datamodels/datamodelInvitation.py b/modules/datamodels/datamodelInvitation.py
index 709e5021..4808bd55 100644
--- a/modules/datamodels/datamodelInvitation.py
+++ b/modules/datamodels/datamodelInvitation.py
@@ -10,9 +10,10 @@ import secrets
from typing import Optional, List
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
+@i18nModel("Einladung")
class Invitation(PowerOnModel):
"""
Einladungs-Token für neue User.
@@ -21,103 +22,76 @@ class Invitation(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the invitation",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
token: str = Field(
default_factory=lambda: secrets.token_urlsafe(32),
description="Secure invitation token",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Token", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
- # Ziel der Einladung
+
mandateId: str = Field(
description="FK → Mandate.id - Target mandate for the invitation",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Mandant", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
featureInstanceId: Optional[str] = Field(
default=None,
description="Optional FK → FeatureInstance.id - Direct access to specific feature",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Feature-Instanz", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
roleIds: List[str] = Field(
default_factory=list,
description="List of Role IDs to assign to the invited user",
- json_schema_extra={"frontend_type": "multiselect", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Rollen", "frontend_type": "multiselect", "frontend_readonly": False, "frontend_required": True}
)
-
- # Einladungs-Details
+
targetUsername: Optional[str] = Field(
default=None,
description="Username of the invited user (must match on acceptance)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Ziel-Benutzername", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
)
email: Optional[str] = Field(
default=None,
description="Email address to send invitation link (optional)",
- json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "E-Mail (optional)", "frontend_type": "email", "frontend_readonly": False, "frontend_required": False}
)
expiresAt: float = Field(
description="When the invitation expires (UTC timestamp)",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Gueltig bis", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": True}
)
-
- # Status
+
usedBy: Optional[str] = Field(
default=None,
description="User ID of the person who used the invitation",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Verwendet von", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
usedAt: Optional[float] = Field(
default=None,
description="When the invitation was used (UTC timestamp)",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Verwendet am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
)
revokedAt: Optional[float] = Field(
default=None,
description="When the invitation was revoked (UTC timestamp)",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Widerrufen am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
)
-
- # Email-Status
+
emailSent: Optional[bool] = Field(
default=False,
description="Whether the invitation email was successfully sent",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "E-Mail gesendet", "frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
)
-
- # Einschränkungen
+
maxUses: int = Field(
default=1,
ge=1,
le=100,
description="Maximum number of times this invitation can be used",
- json_schema_extra={"frontend_type": "number", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Max. Verwendungen", "frontend_type": "number", "frontend_readonly": False, "frontend_required": False}
)
currentUses: int = Field(
default=0,
ge=0,
description="Current number of times this invitation has been used",
- json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Aktuelle Verwendungen", "frontend_type": "number", "frontend_readonly": True, "frontend_required": False}
)
-
-
-registerModelLabels(
- "Invitation",
- {"en": "Invitation", "de": "Einladung", "fr": "Invitation"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "token": {"en": "Token", "de": "Token", "fr": "Jeton"},
- "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- "roleIds": {"en": "Roles", "de": "Rollen", "fr": "Rôles"},
- "targetUsername": {"en": "Target Username", "de": "Ziel-Benutzername", "fr": "Nom d'utilisateur cible"},
- "email": {"en": "Email (optional)", "de": "E-Mail (optional)", "fr": "Email (optionnel)"},
- "expiresAt": {"en": "Expires At", "de": "Gültig bis", "fr": "Expire le"},
- "usedBy": {"en": "Used By", "de": "Verwendet von", "fr": "Utilisé par"},
- "usedAt": {"en": "Used At", "de": "Verwendet am", "fr": "Utilisé le"},
- "revokedAt": {"en": "Revoked At", "de": "Widerrufen am", "fr": "Révoqué le"},
- "emailSent": {"en": "Email Sent", "de": "E-Mail gesendet", "fr": "Email envoyé"},
- "maxUses": {"en": "Max Uses", "de": "Max. Verwendungen", "fr": "Utilisations max"},
- "currentUses": {"en": "Current Uses", "de": "Aktuelle Verwendungen", "fr": "Utilisations actuelles"},
- },
-)
diff --git a/modules/datamodels/datamodelKnowledge.py b/modules/datamodels/datamodelKnowledge.py
index 7ac12c15..7432a30c 100644
--- a/modules/datamodels/datamodelKnowledge.py
+++ b/modules/datamodels/datamodelKnowledge.py
@@ -15,173 +15,231 @@ Vector fields use json_schema_extra={"db_type": "vector(1536)"} for pgvector.
from typing import Dict, Any, List, Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.shared.timeUtils import getUtcTimestamp
import uuid
+@i18nModel("Datei-Inhaltsindex")
class FileContentIndex(PowerOnModel):
- """Structural index of a file's content objects. Created without AI.
- Scope is mirrored from FileItem (poweron_management) at indexing time."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key (typically = fileId)")
- userId: str = Field(description="Owner user ID")
- featureInstanceId: str = Field(default="", description="Feature instance scope")
- mandateId: str = Field(default="", description="Mandate scope")
- fileName: str = Field(description="Original file name")
- mimeType: str = Field(description="MIME type of the file")
- containerPath: Optional[str] = Field(default=None, description="Path within a container (e.g. 'archive.zip/folder/report.pdf')")
- totalObjects: int = Field(default=0, description="Total number of content objects extracted")
- totalSize: int = Field(default=0, description="Total size of all content objects in bytes")
- structure: Dict[str, Any] = Field(default_factory=dict, description="Structural overview (pages, sections, hierarchy)")
- objectSummary: List[Dict[str, Any]] = Field(default_factory=list, description="Compact summary per content object")
- extractedAt: float = Field(default_factory=getUtcTimestamp, description="Extraction timestamp")
- status: str = Field(default="pending", description="Processing status: pending, extracted, embedding, indexed, failed")
+ """Struktureller Index der Inhaltsobjekte einer Datei."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key (typically = fileId)",
+ json_schema_extra={"label": "ID"},
+ )
+ userId: str = Field(
+ description="Owner user ID",
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ featureInstanceId: str = Field(
+ default="",
+ description="Feature instance scope",
+ json_schema_extra={"label": "Feature-Instanz-ID"},
+ )
+ mandateId: str = Field(
+ default="",
+ description="Mandate scope",
+ json_schema_extra={"label": "Mandanten-ID"},
+ )
+ fileName: str = Field(
+ description="Original file name",
+ json_schema_extra={"label": "Dateiname"},
+ )
+ mimeType: str = Field(
+ description="MIME type of the file",
+ json_schema_extra={"label": "MIME-Typ"},
+ )
+ containerPath: Optional[str] = Field(
+ default=None,
+ description="Path within a container (e.g. 'archive.zip/folder/report.pdf')",
+ json_schema_extra={"label": "Container-Pfad"},
+ )
+ totalObjects: int = Field(
+ default=0,
+ description="Total number of content objects extracted",
+ json_schema_extra={"label": "Anzahl Objekte"},
+ )
+ totalSize: int = Field(
+ default=0,
+ description="Total size of all content objects in bytes",
+ json_schema_extra={"label": "Gesamtgroesse"},
+ )
+ structure: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Structural overview (pages, sections, hierarchy)",
+ json_schema_extra={"label": "Struktur"},
+ )
+ objectSummary: List[Dict[str, Any]] = Field(
+ default_factory=list,
+ description="Compact summary per content object",
+ json_schema_extra={"label": "Objekt-Zusammenfassung"},
+ )
+ extractedAt: float = Field(
+ default_factory=getUtcTimestamp,
+ description="Extraction timestamp",
+ json_schema_extra={"label": "Extrahiert am"},
+ )
+ status: str = Field(
+ default="pending",
+ description="Processing status: pending, extracted, embedding, indexed, failed",
+ json_schema_extra={"label": "Status"},
+ )
scope: str = Field(
default="personal",
description="Data visibility scope: personal, featureInstance, mandate, global",
+ json_schema_extra={"label": "Sichtbarkeit"},
)
neutralizationStatus: Optional[str] = Field(
default=None,
description="Neutralization status: completed, failed, skipped, None = not required",
+ json_schema_extra={"label": "Neutralisierungsstatus"},
)
isNeutralized: bool = Field(
default=False,
description="True if content was neutralized before indexing",
+ json_schema_extra={"label": "Neutralisiert"},
)
-registerModelLabels(
- "FileContentIndex",
- {"en": "File Content Index", "fr": "Index du contenu de fichier"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "fileName": {"en": "File Name", "fr": "Nom de fichier"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- "containerPath": {"en": "Container Path", "fr": "Chemin du conteneur"},
- "totalObjects": {"en": "Total Objects", "fr": "Nombre total d'objets"},
- "totalSize": {"en": "Total Size", "fr": "Taille totale"},
- "structure": {"en": "Structure", "fr": "Structure"},
- "objectSummary": {"en": "Object Summary", "fr": "Résumé des objets"},
- "extractedAt": {"en": "Extracted At", "fr": "Extrait le"},
- "status": {"en": "Status", "fr": "Statut"},
- "scope": {"en": "Scope", "de": "Sichtbarkeit"},
- "neutralizationStatus": {"en": "Neutralization Status", "de": "Neutralisierungsstatus"},
- "isNeutralized": {"en": "Is Neutralized", "de": "Neutralisiert"},
- },
-)
-
-
+@i18nModel("Inhalts-Chunk")
class ContentChunk(PowerOnModel):
- """Persisted content chunk with embedding vector. Reusable across workflows.
- Scalar content object (or chunk thereof) with pgvector embedding."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- contentObjectId: str = Field(description="Reference to the content object within FileContentIndex")
- fileId: str = Field(description="FK to the source file")
- userId: str = Field(description="Owner user ID")
- featureInstanceId: str = Field(default="", description="Feature instance scope")
- contentType: str = Field(description="Content type: text, image, videostream, audiostream, other")
- data: str = Field(description="Content data (text, base64, URL)")
- contextRef: Dict[str, Any] = Field(default_factory=dict, description="Context reference (page, position, label)")
- summary: Optional[str] = Field(default=None, description="AI-generated summary (on demand)")
- chunkMetadata: Dict[str, Any] = Field(default_factory=dict, description="Additional metadata")
+ """Persistierter Inhalts-Chunk mit Embedding-Vektor."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ contentObjectId: str = Field(
+ description="Reference to the content object within FileContentIndex",
+ json_schema_extra={"label": "Inhaltsobjekt-ID"},
+ )
+ fileId: str = Field(
+ description="FK to the source file",
+ json_schema_extra={"label": "Datei-ID"},
+ )
+ userId: str = Field(
+ description="Owner user ID",
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ featureInstanceId: str = Field(
+ default="",
+ description="Feature instance scope",
+ json_schema_extra={"label": "Feature-Instanz-ID"},
+ )
+ contentType: str = Field(
+ description="Content type: text, image, videostream, audiostream, other",
+ json_schema_extra={"label": "Inhaltstyp"},
+ )
+ data: str = Field(
+ description="Content data (text, base64, URL)",
+ json_schema_extra={"label": "Daten"},
+ )
+ contextRef: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Context reference (page, position, label)",
+ json_schema_extra={"label": "Kontext-Referenz"},
+ )
+ summary: Optional[str] = Field(
+ default=None,
+ description="AI-generated summary (on demand)",
+ json_schema_extra={"label": "Zusammenfassung"},
+ )
+ chunkMetadata: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Additional metadata",
+ json_schema_extra={"label": "Metadaten"},
+ )
embedding: Optional[List[float]] = Field(
- default=None, description="pgvector embedding (NOT NULL for text chunks)",
- json_schema_extra={"db_type": "vector(1536)"}
+ default=None,
+ description="pgvector embedding (NOT NULL for text chunks)",
+ json_schema_extra={"label": "Embedding", "db_type": "vector(1536)"},
)
-registerModelLabels(
- "ContentChunk",
- {"en": "Content Chunk", "fr": "Fragment de contenu"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "contentObjectId": {"en": "Content Object ID", "fr": "ID de l'objet de contenu"},
- "fileId": {"en": "File ID", "fr": "ID du fichier"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"},
- "contentType": {"en": "Content Type", "fr": "Type de contenu"},
- "data": {"en": "Data", "fr": "Données"},
- "contextRef": {"en": "Context Reference", "fr": "Référence contextuelle"},
- "summary": {"en": "Summary", "fr": "Résumé"},
- "chunkMetadata": {"en": "Metadata", "fr": "Métadonnées"},
- "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"},
- },
-)
-
-
+@i18nModel("Runden-Speicher")
class RoundMemory(PowerOnModel):
- """Persistent per-round memory for agent tool results, file refs, and decisions.
-
- Stored after each agent round so that RAG can retrieve relevant context
- even after the ConversationManager summarises older messages away.
- """
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- workflowId: str = Field(description="FK to the workflow")
- roundNumber: int = Field(default=0, description="Agent round that produced this memory")
- memoryType: str = Field(
- description="Category: file_ref, tool_result, decision, data_source_ref"
+ """Persistenter Speicher pro Agenten-Runde."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ workflowId: str = Field(
+ description="FK to the workflow",
+ json_schema_extra={"label": "Workflow-ID"},
+ )
+ roundNumber: int = Field(
+ default=0,
+ description="Agent round that produced this memory",
+ json_schema_extra={"label": "Rundennummer"},
+ )
+ memoryType: str = Field(
+ description="Category: file_ref, tool_result, decision, data_source_ref",
+ json_schema_extra={"label": "Speichertyp"},
+ )
+ key: str = Field(
+ description="Dedup key, e.g. 'readFile:' or 'plan'",
+ json_schema_extra={"label": "Schluessel"},
+ )
+ summary: str = Field(
+ default="",
+ description="Compact summary (max ~2000 chars)",
+ json_schema_extra={"label": "Zusammenfassung"},
)
- key: str = Field(description="Dedup key, e.g. 'readFile:' or 'plan'")
- summary: str = Field(default="", description="Compact summary (max ~2000 chars)")
fullData: Optional[str] = Field(
default=None,
description="Full tool output when small enough (max ~8000 chars)",
+ json_schema_extra={"label": "Volldaten"},
+ )
+ fileIds: List[str] = Field(
+ default_factory=list,
+ description="Referenced file IDs",
+ json_schema_extra={"label": "Datei-IDs"},
)
- fileIds: List[str] = Field(default_factory=list, description="Referenced file IDs")
embedding: Optional[List[float]] = Field(
default=None,
description="Embedding of summary for semantic retrieval",
- json_schema_extra={"db_type": "vector(1536)"},
+ json_schema_extra={"label": "Embedding", "db_type": "vector(1536)"},
)
-registerModelLabels(
- "RoundMemory",
- {"en": "Round Memory", "fr": "Mémoire de tour"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "workflowId": {"en": "Workflow ID", "fr": "ID du workflow"},
- "roundNumber": {"en": "Round Number", "fr": "Numéro de tour"},
- "memoryType": {"en": "Memory Type", "fr": "Type de mémoire"},
- "key": {"en": "Key", "fr": "Clé"},
- "summary": {"en": "Summary", "fr": "Résumé"},
- "fullData": {"en": "Full Data", "fr": "Données complètes"},
- "fileIds": {"en": "File IDs", "fr": "IDs de fichier"},
- "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"},
- },
-)
-
-
+@i18nModel("Workflow-Speicher")
class WorkflowMemory(PowerOnModel):
- """Workflow-scoped key-value cache for entities and facts.
- Extracted during agent rounds, persisted for cross-round and cross-workflow reuse."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- workflowId: str = Field(description="FK to the workflow")
- userId: str = Field(description="Owner user ID")
- featureInstanceId: str = Field(default="", description="Feature instance scope")
- key: str = Field(description="Key identifier (e.g. 'entity:companyName')")
- value: str = Field(description="Extracted value")
- source: str = Field(default="extraction", description="Origin: extraction, tool, conversation, summary")
- embedding: Optional[List[float]] = Field(
- default=None, description="Optional embedding for semantic lookup",
- json_schema_extra={"db_type": "vector(1536)"}
+ """Workflow-spezifischer Key-Value-Cache fuer Entitaeten und Fakten."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ workflowId: str = Field(
+ description="FK to the workflow",
+ json_schema_extra={"label": "Workflow-ID"},
+ )
+ userId: str = Field(
+ description="Owner user ID",
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ featureInstanceId: str = Field(
+ default="",
+ description="Feature instance scope",
+ json_schema_extra={"label": "Feature-Instanz-ID"},
+ )
+ key: str = Field(
+ description="Key identifier (e.g. 'entity:companyName')",
+ json_schema_extra={"label": "Schluessel"},
+ )
+ value: str = Field(
+ description="Extracted value",
+ json_schema_extra={"label": "Wert"},
+ )
+ source: str = Field(
+ default="extraction",
+ description="Origin: extraction, tool, conversation, summary",
+ json_schema_extra={"label": "Quelle"},
+ )
+ embedding: Optional[List[float]] = Field(
+ default=None,
+ description="Optional embedding for semantic lookup",
+ json_schema_extra={"label": "Embedding", "db_type": "vector(1536)"},
)
-
-
-registerModelLabels(
- "WorkflowMemory",
- {"en": "Workflow Memory", "fr": "Mémoire de workflow"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "workflowId": {"en": "Workflow ID", "fr": "ID du workflow"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance"},
- "key": {"en": "Key", "fr": "Clé"},
- "value": {"en": "Value", "fr": "Valeur"},
- "source": {"en": "Source", "fr": "Source"},
- "embedding": {"en": "Embedding", "fr": "Vecteur d'embedding"},
- },
-)
diff --git a/modules/datamodels/datamodelMembership.py b/modules/datamodels/datamodelMembership.py
index ce753d15..29fe5881 100644
--- a/modules/datamodels/datamodelMembership.py
+++ b/modules/datamodels/datamodelMembership.py
@@ -10,9 +10,10 @@ Rollen werden über Junction Tables verknüpft für saubere CASCADE DELETE.
import uuid
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
+@i18nModel("Benutzer-Mandant")
class UserMandate(PowerOnModel):
"""
User-Mitgliedschaft in einem Mandanten.
@@ -21,36 +22,24 @@ class UserMandate(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the user-mandate membership",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
userId: str = Field(
description="FK → User.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/users/", "frontend_fk_display_field": "username"}
+ json_schema_extra={"label": "Benutzer", "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/users/", "frontend_fk_display_field": "username"}
)
mandateId: str = Field(
description="FK → Mandate.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/mandates/", "frontend_fk_display_field": "label"}
+ json_schema_extra={"label": "Mandant", "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/mandates/", "frontend_fk_display_field": "label"}
)
enabled: bool = Field(
default=True,
description="Whether this membership is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
)
- # Rollen werden via Junction Table UserMandateRole verknüpft
-
-
-registerModelLabels(
- "UserMandate",
- {"en": "User Mandate", "de": "Benutzer-Mandant", "fr": "Mandat utilisateur"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
- "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
- "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"},
- },
-)
+@i18nModel("Feature-Zugang")
class FeatureAccess(PowerOnModel):
"""
User-Zugriff auf eine Feature-Instanz.
@@ -59,36 +48,24 @@ class FeatureAccess(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the feature access",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
userId: str = Field(
description="FK → User.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/users/", "frontend_fk_display_field": "username"}
+ json_schema_extra={"label": "Benutzer", "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/users/", "frontend_fk_display_field": "username"}
)
featureInstanceId: str = Field(
description="FK → FeatureInstance.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/feature-instances/", "frontend_fk_display_field": "name"}
+ json_schema_extra={"label": "Feature-Instanz", "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/features/instances", "frontend_fk_display_field": "label"}
)
enabled: bool = Field(
default=True,
description="Whether this feature access is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
)
- # Rollen werden via Junction Table FeatureAccessRole verknüpft
-
-
-registerModelLabels(
- "FeatureAccess",
- {"en": "Feature Access", "de": "Feature-Zugang", "fr": "Accès fonctionnalité"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"},
- },
-)
+@i18nModel("Benutzer-Mandant-Rolle")
class UserMandateRole(PowerOnModel):
"""
Junction Table: UserMandate zu Role.
@@ -97,29 +74,19 @@ class UserMandateRole(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the junction record",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
userMandateId: str = Field(
description="FK → UserMandate.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/user-mandates/", "frontend_fk_display_field": "userId"}
+ json_schema_extra={"label": "Benutzer-Mandant", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
roleId: str = Field(
description="FK → Role.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/rbac/roles", "frontend_fk_display_field": "roleLabel"}
+ json_schema_extra={"label": "Rolle", "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/rbac/roles", "frontend_fk_display_field": "roleLabel"}
)
-registerModelLabels(
- "UserMandateRole",
- {"en": "User Mandate Role", "de": "Benutzer-Mandant-Rolle", "fr": "Rôle mandat utilisateur"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userMandateId": {"en": "User Mandate", "de": "Benutzer-Mandant", "fr": "Mandat utilisateur"},
- "roleId": {"en": "Role", "de": "Rolle", "fr": "Rôle"},
- },
-)
-
-
+@i18nModel("Feature-Zugang-Rolle")
class FeatureAccessRole(PowerOnModel):
"""
Junction Table: FeatureAccess zu Role.
@@ -128,24 +95,13 @@ class FeatureAccessRole(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the junction record",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
featureAccessId: str = Field(
description="FK → FeatureAccess.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/feature-access/", "frontend_fk_display_field": "userId"}
+ json_schema_extra={"label": "Feature-Zugang", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
roleId: str = Field(
description="FK → Role.id (CASCADE DELETE)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/rbac/roles", "frontend_fk_display_field": "roleLabel"}
+ json_schema_extra={"label": "Rolle", "frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_fk_source": "/api/rbac/roles", "frontend_fk_display_field": "roleLabel"}
)
-
-
-registerModelLabels(
- "FeatureAccessRole",
- {"en": "Feature Access Role", "de": "Feature-Zugang-Rolle", "fr": "Rôle accès fonctionnalité"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "featureAccessId": {"en": "Feature Access", "de": "Feature-Zugang", "fr": "Accès fonctionnalité"},
- "roleId": {"en": "Role", "de": "Rolle", "fr": "Rôle"},
- },
-)
diff --git a/modules/datamodels/datamodelMessaging.py b/modules/datamodels/datamodelMessaging.py
index ebacc9d4..1a32a09e 100644
--- a/modules/datamodels/datamodelMessaging.py
+++ b/modules/datamodels/datamodelMessaging.py
@@ -7,7 +7,7 @@ from typing import Optional
from enum import Enum
from pydantic import BaseModel, Field, ConfigDict
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
class MessagingChannel(str, Enum):
@@ -26,86 +26,137 @@ class DeliveryStatus(str, Enum):
FAILED = "failed"
+@i18nModel("Messaging-Abonnement")
class MessagingSubscription(PowerOnModel):
"""Data model for messaging subscriptions"""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the subscription",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "ID",
+ },
)
subscriptionId: str = Field(
description="Unique subscription identifier (e.g., 'system_errors', 'audit_login')",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ "label": "Abonnement-ID",
+ },
)
subscriptionLabel: str = Field(
description="Display name of the subscription",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ "label": "Bezeichnung",
+ },
)
mandateId: str = Field(
description="ID of the mandate this subscription belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Mandanten-ID",
+ },
)
featureInstanceId: str = Field(
description="ID of the feature instance this subscription belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Feature-Instanz-ID",
+ },
)
description: Optional[str] = Field(
default=None,
description="Description of the subscription",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "textarea",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ "label": "Beschreibung",
+ },
)
isSystemSubscription: bool = Field(
default=False,
description="Whether this is a system subscription (only admin can create)",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "checkbox",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "System-Abonnement",
+ },
)
enabled: bool = Field(
default=True,
description="Whether the subscription is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "checkbox",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ "label": "Aktiviert",
+ },
)
model_config = ConfigDict(use_enum_values=True)
-registerModelLabels(
- "MessagingSubscription",
- {"en": "Messaging Subscription", "fr": "Abonnement de messagerie"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "subscriptionId": {"en": "Subscription ID", "fr": "ID d'abonnement"},
- "subscriptionLabel": {"en": "Subscription Label", "fr": "Label d'abonnement"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"},
- "description": {"en": "Description", "fr": "Description"},
- "isSystemSubscription": {"en": "System Subscription", "fr": "Abonnement système"},
- "enabled": {"en": "Enabled", "fr": "Activé"},
- },
-)
-
-
+@i18nModel("Messaging-Registrierung")
class MessagingSubscriptionRegistration(BaseModel):
"""Data model for user registrations to messaging subscriptions"""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the registration",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "ID",
+ },
)
mandateId: str = Field(
description="ID of the mandate this registration belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Mandanten-ID",
+ },
)
featureInstanceId: str = Field(
description="ID of the feature instance this registration belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Feature-Instanz-ID",
+ },
)
subscriptionId: str = Field(
description="ID of the subscription this registration belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ "label": "Abonnement-ID",
+ },
)
userId: str = Field(
description="ID of the user registered to this subscription",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Benutzer-ID",
+ },
)
channel: MessagingChannel = Field(
description="Channel type for this registration",
@@ -114,65 +165,86 @@ class MessagingSubscriptionRegistration(BaseModel):
"frontend_readonly": False,
"frontend_required": True,
"frontend_options": [
- {"value": "email", "label": {"en": "Email", "fr": "Email"}},
- {"value": "sms", "label": {"en": "SMS", "fr": "SMS"}},
- {"value": "whatsapp", "label": {"en": "WhatsApp", "fr": "WhatsApp"}},
- {"value": "teams_chat", "label": {"en": "Teams Chat", "fr": "Chat Teams"}}
- ]
- }
+ {"value": "email", "label": "Email"},
+ {"value": "sms", "label": "SMS"},
+ {"value": "whatsapp", "label": "WhatsApp"},
+ {"value": "teams_chat", "label": "Teams Chat"},
+ ],
+ "label": "Kanal",
+ },
)
channelConfig: str = Field(
default="",
description="Channel-specific configuration (e.g., email address, phone number, Teams user ID)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ "label": "Kanal-Konfiguration",
+ },
)
enabled: bool = Field(
default=True,
description="Whether this registration is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "checkbox",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ "label": "Aktiviert",
+ },
)
model_config = ConfigDict(use_enum_values=True)
-registerModelLabels(
- "MessagingSubscriptionRegistration",
- {"en": "Messaging Registration", "fr": "Inscription à la messagerie"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"},
- "subscriptionId": {"en": "Subscription ID", "fr": "ID d'abonnement"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "channel": {"en": "Channel", "fr": "Canal"},
- "channelConfig": {"en": "Channel Config", "fr": "Configuration du canal"},
- "enabled": {"en": "Enabled", "fr": "Activé"},
- },
-)
-
-
+@i18nModel("Messaging-Zustellung")
class MessagingDelivery(BaseModel):
"""Data model for individual message deliveries"""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the delivery",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "ID",
+ },
)
mandateId: str = Field(
description="ID of the mandate this delivery belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Mandanten-ID",
+ },
)
featureInstanceId: str = Field(
description="ID of the feature instance this delivery belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Feature-Instanz-ID",
+ },
)
subscriptionId: str = Field(
description="ID of the subscription this delivery belongs to",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Abonnement-ID",
+ },
)
userId: str = Field(
description="ID of the user receiving this delivery",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Benutzer-ID",
+ },
)
channel: MessagingChannel = Field(
description="Channel used for this delivery",
@@ -181,12 +253,13 @@ class MessagingDelivery(BaseModel):
"frontend_readonly": True,
"frontend_required": False,
"frontend_options": [
- {"value": "email", "label": {"en": "Email", "fr": "Email"}},
- {"value": "sms", "label": {"en": "SMS", "fr": "SMS"}},
- {"value": "whatsapp", "label": {"en": "WhatsApp", "fr": "WhatsApp"}},
- {"value": "teams_chat", "label": {"en": "Teams Chat", "fr": "Chat Teams"}}
- ]
- }
+ {"value": "email", "label": "Email"},
+ {"value": "sms", "label": "SMS"},
+ {"value": "whatsapp", "label": "WhatsApp"},
+ {"value": "teams_chat", "label": "Teams Chat"},
+ ],
+ "label": "Kanal",
+ },
)
status: DeliveryStatus = Field(
default=DeliveryStatus.PENDING,
@@ -196,114 +269,115 @@ class MessagingDelivery(BaseModel):
"frontend_readonly": True,
"frontend_required": False,
"frontend_options": [
- {"value": "pending", "label": {"en": "Pending", "fr": "En attente"}},
- {"value": "sent", "label": {"en": "Sent", "fr": "Envoyé"}},
- {"value": "failed", "label": {"en": "Failed", "fr": "Échoué"}}
- ]
- }
+ {"value": "pending", "label": "Pending"},
+ {"value": "sent", "label": "Sent"},
+ {"value": "failed", "label": "Failed"},
+ ],
+ "label": "Status",
+ },
)
errorMessage: Optional[str] = Field(
default=None,
description="Error message if delivery failed",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "textarea",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Fehlermeldung",
+ },
)
sentAt: Optional[float] = Field(
default=None,
description="When the delivery was sent (UTC timestamp in seconds)",
- json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "datetime",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Gesendet am",
+ },
)
model_config = ConfigDict(use_enum_values=True)
-registerModelLabels(
- "MessagingDelivery",
- {"en": "Messaging Delivery", "fr": "Livraison de messagerie"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"},
- "subscriptionId": {"en": "Subscription ID", "fr": "ID d'abonnement"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "channel": {"en": "Channel", "fr": "Canal"},
- "status": {"en": "Status", "fr": "Statut"},
- "errorMessage": {"en": "Error Message", "fr": "Message d'erreur"},
- "sentAt": {"en": "Sent At", "fr": "Envoyé le"},
- },
-)
-
-
+@i18nModel("Messaging-Ereignisparameter")
class MessagingEventParameters(BaseModel):
"""Data model for event parameters passed to subscription functions"""
triggerData: dict = Field(
default_factory=dict,
description="Event data from trigger as dictionary/JSON",
- json_schema_extra={"frontend_type": "json", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "json",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ "label": "Trigger-Daten",
+ },
)
-registerModelLabels(
- "MessagingEventParameters",
- {"en": "Messaging Event Parameters", "fr": "Paramètres d'événement de messagerie"},
- {
- "triggerData": {"en": "Trigger Data", "fr": "Données de déclenchement"},
- },
-)
-
-
-registerModelLabels(
- "MessagingSendResult",
- {"en": "Messaging Send Result", "fr": "Résultat d'envoi de messagerie"},
- {
- "success": {"en": "Success", "fr": "Succès"},
- "deliveryId": {"en": "Delivery ID", "fr": "ID de livraison"},
- "errorMessage": {"en": "Error Message", "fr": "Message d'erreur"},
- },
-)
-
-
-registerModelLabels(
- "MessagingSubscriptionExecutionResult",
- {"en": "Messaging Subscription Execution Result", "fr": "Résultat d'exécution d'abonnement"},
- {
- "success": {"en": "Success", "fr": "Succès"},
- "messagesSent": {"en": "Messages Sent", "fr": "Messages envoyés"},
- "errorMessage": {"en": "Error Message", "fr": "Message d'erreur"},
- },
-)
-
-
+@i18nModel("Messaging-Sendeergebnis")
class MessagingSendResult(BaseModel):
"""Data model for sendMessage result"""
success: bool = Field(
description="Whether the message was sent successfully",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={
+ "frontend_type": "checkbox",
+ "frontend_readonly": True,
+ "frontend_required": True,
+ "label": "Erfolg",
+ },
)
deliveryId: Optional[str] = Field(
default=None,
description="ID of the created MessagingDelivery record",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "text",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Zustellungs-ID",
+ },
)
errorMessage: Optional[str] = Field(
default=None,
description="Error message if sending failed",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "textarea",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Fehlermeldung",
+ },
)
+@i18nModel("Messaging-Abonnement-Ausführung")
class MessagingSubscriptionExecutionResult(BaseModel):
"""Data model for subscription function execution result"""
success: bool = Field(
description="Whether the subscription execution was successful",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={
+ "frontend_type": "checkbox",
+ "frontend_readonly": True,
+ "frontend_required": True,
+ "label": "Erfolg",
+ },
)
messagesSent: int = Field(
default=0,
description="Number of messages sent",
- json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "number",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Gesendete Nachrichten",
+ },
)
errorMessage: Optional[str] = Field(
default=None,
description="Error message if execution failed",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={
+ "frontend_type": "textarea",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "label": "Fehlermeldung",
+ },
)
diff --git a/modules/datamodels/datamodelNotification.py b/modules/datamodels/datamodelNotification.py
index f5af0f55..6ff7b52e 100644
--- a/modules/datamodels/datamodelNotification.py
+++ b/modules/datamodels/datamodelNotification.py
@@ -10,7 +10,7 @@ from typing import Optional, List
from enum import Enum
from pydantic import BaseModel, Field, ConfigDict
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
class NotificationType(str, Enum):
@@ -29,20 +29,25 @@ class NotificationStatus(str, Enum):
DISMISSED = "dismissed" # Verworfen/Geschlossen
+@i18nModel("Benachrichtigungs-Aktion")
class NotificationAction(BaseModel):
"""Possible action for a notification"""
actionId: str = Field(
- description="Unique identifier for the action (e.g., 'accept', 'decline')"
+ description="Unique identifier for the action (e.g., 'accept', 'decline')",
+ json_schema_extra={"label": "Aktions-ID"},
)
label: str = Field(
- description="Display label for the action button"
+ description="Display label for the action button",
+ json_schema_extra={"label": "Bezeichnung"},
)
style: str = Field(
default="default",
- description="Button style: 'primary', 'danger', 'default'"
+ description="Button style: 'primary', 'danger', 'default'",
+ json_schema_extra={"label": "Stil"},
)
+@i18nModel("Benachrichtigung")
class UserNotification(PowerOnModel):
"""
In-app notification for a user.
@@ -51,26 +56,26 @@ class UserNotification(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the notification",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
userId: str = Field(
description="Target user ID for this notification",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Benutzer", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
-
- # Notification type and status
+
type: NotificationType = Field(
default=NotificationType.SYSTEM,
description="Type of notification",
json_schema_extra={
+ "label": "Typ",
"frontend_type": "select",
"frontend_readonly": True,
"frontend_required": True,
"frontend_options": [
- {"value": "invitation", "label": {"en": "Invitation", "de": "Einladung"}},
- {"value": "system", "label": {"en": "System", "de": "System"}},
- {"value": "workflow", "label": {"en": "Workflow", "de": "Workflow"}},
- {"value": "mention", "label": {"en": "Mention", "de": "Erwähnung"}}
+ {"value": "invitation", "label": "Einladung"},
+ {"value": "system", "label": "System"},
+ {"value": "workflow", "label": "Workflow"},
+ {"value": "mention", "label": "Erwähnung"}
]
}
)
@@ -78,126 +83,75 @@ class UserNotification(PowerOnModel):
default=NotificationStatus.UNREAD,
description="Current status of the notification",
json_schema_extra={
+ "label": "Status",
"frontend_type": "select",
"frontend_readonly": True,
"frontend_required": False,
"frontend_options": [
- {"value": "unread", "label": {"en": "Unread", "de": "Ungelesen"}},
- {"value": "read", "label": {"en": "Read", "de": "Gelesen"}},
- {"value": "actioned", "label": {"en": "Actioned", "de": "Bearbeitet"}},
- {"value": "dismissed", "label": {"en": "Dismissed", "de": "Verworfen"}}
+ {"value": "unread", "label": "Ungelesen"},
+ {"value": "read", "label": "Gelesen"},
+ {"value": "actioned", "label": "Bearbeitet"},
+ {"value": "dismissed", "label": "Verworfen"}
]
}
)
-
- # Content
+
title: str = Field(
description="Notification title",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Titel", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
)
message: str = Field(
description="Notification message/body",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"label": "Nachricht", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": True}
)
icon: Optional[str] = Field(
default=None,
description="Optional icon identifier (e.g., 'mail', 'warning', 'info')",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Symbol", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
- # Reference to triggering object (for actionable notifications)
+
referenceType: Optional[str] = Field(
default=None,
description="Type of referenced object (e.g., 'Invitation', 'Workflow')",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Referenz-Typ", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
referenceId: Optional[str] = Field(
default=None,
description="ID of referenced object",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Referenz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
-
- # Actions (for actionable notifications like invitations)
+
actions: Optional[List[NotificationAction]] = Field(
default=None,
description="List of possible actions for this notification",
- json_schema_extra={"frontend_type": "json", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Aktionen", "frontend_type": "json", "frontend_readonly": True, "frontend_required": False}
)
-
- # Action result (when user takes action)
+
actionTaken: Optional[str] = Field(
default=None,
description="Which action was taken (actionId)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Durchgefuehrte Aktion", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
)
actionResult: Optional[str] = Field(
default=None,
description="Result message from the action",
- json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Aktions-Ergebnis", "frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False}
)
-
- # Timestamps
+
readAt: Optional[float] = Field(
default=None,
description="When the notification was read (UTC timestamp)",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Gelesen am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
)
actionedAt: Optional[float] = Field(
default=None,
description="When action was taken (UTC timestamp)",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Bearbeitet am", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
)
expiresAt: Optional[float] = Field(
default=None,
description="When the notification expires (optional, UTC timestamp)",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "Gueltig bis", "frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
)
-
+
model_config = ConfigDict(use_enum_values=True)
-
-
-registerModelLabels(
- "UserNotification",
- {"en": "Notification", "de": "Benachrichtigung", "fr": "Notification"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
- "type": {"en": "Type", "de": "Typ", "fr": "Type"},
- "status": {"en": "Status", "de": "Status", "fr": "Statut"},
- "title": {"en": "Title", "de": "Titel", "fr": "Titre"},
- "message": {"en": "Message", "de": "Nachricht", "fr": "Message"},
- "icon": {"en": "Icon", "de": "Symbol", "fr": "Icône"},
- "referenceType": {"en": "Reference Type", "de": "Referenz-Typ", "fr": "Type de référence"},
- "referenceId": {"en": "Reference ID", "de": "Referenz-ID", "fr": "ID de référence"},
- "actions": {"en": "Actions", "de": "Aktionen", "fr": "Actions"},
- "actionTaken": {"en": "Action Taken", "de": "Durchgeführte Aktion", "fr": "Action effectuée"},
- "actionResult": {"en": "Action Result", "de": "Aktions-Ergebnis", "fr": "Résultat de l'action"},
- "readAt": {"en": "Read At", "de": "Gelesen am", "fr": "Lu le"},
- "actionedAt": {"en": "Actioned At", "de": "Bearbeitet am", "fr": "Traité le"},
- "expiresAt": {"en": "Expires At", "de": "Gültig bis", "fr": "Expire le"},
- },
-)
-
-
-registerModelLabels(
- "NotificationType",
- {"en": "Notification Type", "de": "Benachrichtigungs-Typ", "fr": "Type de notification"},
- {
- "invitation": {"en": "Invitation", "de": "Einladung", "fr": "Invitation"},
- "system": {"en": "System", "de": "System", "fr": "Système"},
- "workflow": {"en": "Workflow", "de": "Workflow", "fr": "Workflow"},
- "mention": {"en": "Mention", "de": "Erwähnung", "fr": "Mention"},
- },
-)
-
-
-registerModelLabels(
- "NotificationStatus",
- {"en": "Notification Status", "de": "Benachrichtigungs-Status", "fr": "Statut de notification"},
- {
- "unread": {"en": "Unread", "de": "Ungelesen", "fr": "Non lu"},
- "read": {"en": "Read", "de": "Gelesen", "fr": "Lu"},
- "actioned": {"en": "Actioned", "de": "Bearbeitet", "fr": "Traité"},
- "dismissed": {"en": "Dismissed", "de": "Verworfen", "fr": "Rejeté"},
- },
-)
diff --git a/modules/datamodels/datamodelRbac.py b/modules/datamodels/datamodelRbac.py
index b9e0cb91..d43b825e 100644
--- a/modules/datamodels/datamodelRbac.py
+++ b/modules/datamodels/datamodelRbac.py
@@ -14,7 +14,7 @@ from typing import Optional
from enum import Enum
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.datamodels.datamodelUtils import TextMultilingual
from modules.datamodels.datamodelUam import AccessLevel
@@ -26,6 +26,7 @@ class AccessRuleContext(str, Enum):
RESOURCE = "RESOURCE" # System resources (AI models, actions, etc.)
+@i18nModel("Rolle")
class Role(PowerOnModel):
"""
Data model for RBAC roles.
@@ -41,56 +42,42 @@ class Role(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the role",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
roleLabel: str = Field(
description="Unique role label identifier (e.g., 'admin', 'user', 'viewer')",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Rollen-Label", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
)
description: TextMultilingual = Field(
description="Role description in multiple languages",
- json_schema_extra={"frontend_type": "multilingual", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Beschreibung", "frontend_type": "multilingual", "frontend_readonly": False, "frontend_required": True}
)
# KONTEXT - IMMUTABLE nach Create (nur Create/Delete, kein Update!)
mandateId: Optional[str] = Field(
default=None,
description="FK → Mandate.id (CASCADE DELETE). Null = Global/Template role.",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, "frontend_fk_source": "/api/mandates/", "frontend_fk_display_field": "label"}
+ json_schema_extra={"label": "Mandant", "frontend_type": "select", "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, "frontend_fk_source": "/api/mandates/", "frontend_fk_display_field": "label"}
)
featureInstanceId: Optional[str] = Field(
default=None,
description="FK → FeatureInstance.id (CASCADE DELETE). Null = Mandate-level or Global role.",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, "frontend_fk_source": "/api/feature-instances/", "frontend_fk_display_field": "name"}
+ json_schema_extra={"label": "Feature-Instanz", "frontend_type": "select", "frontend_readonly": True, "frontend_visible": True, "frontend_required": False, "frontend_fk_source": "/api/features/instances", "frontend_fk_display_field": "label"}
)
featureCode: Optional[str] = Field(
default=None,
description="Feature code (z.B. 'trustee') - für Template-Rollen",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "Feature-Code", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
-
+
isSystemRole: bool = Field(
default=False,
description="Whether this is a system role that cannot be deleted",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"label": "System-Rolle", "frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
)
-registerModelLabels(
- "Role",
- {"en": "Role", "de": "Rolle", "fr": "Rôle"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "roleLabel": {"en": "Role Label", "de": "Rollen-Label", "fr": "Label du rôle"},
- "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
- "mandateId": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- "featureCode": {"en": "Feature Code", "de": "Feature-Code", "fr": "Code fonctionnalité"},
- "isSystemRole": {"en": "System Role", "de": "System-Rolle", "fr": "Rôle système"},
- },
-)
-
-
+@i18nModel("Zugriffsregel")
class AccessRule(PowerOnModel):
"""
Data model for access control rules.
@@ -101,89 +88,72 @@ class AccessRule(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the access rule",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
)
roleId: str = Field(
description="FK → Role.id (CASCADE DELETE!)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": True, "frontend_fk_source": "/api/rbac/roles", "frontend_fk_display_field": "roleLabel"}
+ json_schema_extra={"label": "Rolle", "frontend_type": "select", "frontend_readonly": True, "frontend_required": True, "frontend_fk_source": "/api/rbac/roles", "frontend_fk_display_field": "roleLabel"}
)
context: AccessRuleContext = Field(
description="Context type: DATA (database), UI (interface), RESOURCE (system resources). IMMUTABLE!",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": True, "frontend_options": [
- {"value": "DATA", "label": {"en": "Data", "de": "Daten", "fr": "Données"}},
- {"value": "UI", "label": {"en": "UI", "de": "Oberfläche", "fr": "Interface"}},
- {"value": "RESOURCE", "label": {"en": "Resource", "de": "Ressource", "fr": "Ressource"}}
+ json_schema_extra={"label": "Kontext", "frontend_type": "select", "frontend_readonly": True, "frontend_required": True, "frontend_options": [
+ {"value": "DATA", "label": "Daten"},
+ {"value": "UI", "label": "Oberfläche"},
+ {"value": "RESOURCE", "label": "Ressource"}
]}
)
item: Optional[str] = Field(
default=None,
description="Item identifier (null = all items in context). Format: DATA: '' or '.', UI: cascading string (e.g., 'playground.voice.settings'), RESOURCE: cascading string (e.g., 'ai.model.anthropic')",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"label": "Element", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
)
view: bool = Field(
default=False,
description="View permission: if true, item is visible/enabled. Only objects with view=true are shown.",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"label": "Anzeigen", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": True}
)
read: Optional[AccessLevel] = Field(
default=None,
description="Read permission level (only for DATA context)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "a", "label": {"en": "All Records", "de": "Alle Datensätze", "fr": "Tous les enregistrements"}},
- {"value": "m", "label": {"en": "My Records", "de": "Meine Datensätze", "fr": "Mes enregistrements"}},
- {"value": "g", "label": {"en": "Group Records", "de": "Gruppen-Datensätze", "fr": "Enregistrements du groupe"}},
- {"value": "n", "label": {"en": "No Access", "de": "Kein Zugriff", "fr": "Aucun accès"}}
+ json_schema_extra={"label": "Lesen", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "a", "label": "Alle Datensätze"},
+ {"value": "m", "label": "Meine Datensätze"},
+ {"value": "g", "label": "Gruppen-Datensätze"},
+ {"value": "n", "label": "Kein Zugriff"}
]}
)
create: Optional[AccessLevel] = Field(
default=None,
description="Create permission level (only for DATA context)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "a", "label": {"en": "All Records", "de": "Alle Datensätze", "fr": "Tous les enregistrements"}},
- {"value": "m", "label": {"en": "My Records", "de": "Meine Datensätze", "fr": "Mes enregistrements"}},
- {"value": "g", "label": {"en": "Group Records", "de": "Gruppen-Datensätze", "fr": "Enregistrements du groupe"}},
- {"value": "n", "label": {"en": "No Access", "de": "Kein Zugriff", "fr": "Aucun accès"}}
+ json_schema_extra={"label": "Erstellen", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "a", "label": "Alle Datensätze"},
+ {"value": "m", "label": "Meine Datensätze"},
+ {"value": "g", "label": "Gruppen-Datensätze"},
+ {"value": "n", "label": "Kein Zugriff"}
]}
)
update: Optional[AccessLevel] = Field(
default=None,
description="Update permission level (only for DATA context)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "a", "label": {"en": "All Records", "de": "Alle Datensätze", "fr": "Tous les enregistrements"}},
- {"value": "m", "label": {"en": "My Records", "de": "Meine Datensätze", "fr": "Mes enregistrements"}},
- {"value": "g", "label": {"en": "Group Records", "de": "Gruppen-Datensätze", "fr": "Enregistrements du groupe"}},
- {"value": "n", "label": {"en": "No Access", "de": "Kein Zugriff", "fr": "Aucun accès"}}
+ json_schema_extra={"label": "Aktualisieren", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "a", "label": "Alle Datensätze"},
+ {"value": "m", "label": "Meine Datensätze"},
+ {"value": "g", "label": "Gruppen-Datensätze"},
+ {"value": "n", "label": "Kein Zugriff"}
]}
)
delete: Optional[AccessLevel] = Field(
default=None,
description="Delete permission level (only for DATA context)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "a", "label": {"en": "All Records", "de": "Alle Datensätze", "fr": "Tous les enregistrements"}},
- {"value": "m", "label": {"en": "My Records", "de": "Meine Datensätze", "fr": "Mes enregistrements"}},
- {"value": "g", "label": {"en": "Group Records", "de": "Gruppen-Datensätze", "fr": "Enregistrements du groupe"}},
- {"value": "n", "label": {"en": "No Access", "de": "Kein Zugriff", "fr": "Aucun accès"}}
+ json_schema_extra={"label": "Loeschen", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "a", "label": "Alle Datensätze"},
+ {"value": "m", "label": "Meine Datensätze"},
+ {"value": "g", "label": "Gruppen-Datensätze"},
+ {"value": "n", "label": "Kein Zugriff"}
]}
)
-registerModelLabels(
- "AccessRule",
- {"en": "Access Rule", "de": "Zugriffsregel", "fr": "Règle d'accès"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "roleId": {"en": "Role", "de": "Rolle", "fr": "Rôle"},
- "context": {"en": "Context", "de": "Kontext", "fr": "Contexte"},
- "item": {"en": "Item", "de": "Element", "fr": "Élément"},
- "view": {"en": "View", "de": "Anzeigen", "fr": "Vue"},
- "read": {"en": "Read", "de": "Lesen", "fr": "Lecture"},
- "create": {"en": "Create", "de": "Erstellen", "fr": "Créer"},
- "update": {"en": "Update", "de": "Aktualisieren", "fr": "Mettre à jour"},
- "delete": {"en": "Delete", "de": "Löschen", "fr": "Supprimer"},
- },
-)
-
-
# IMMUTABLE Fields Definition - für Enforcement auf Application-Level
IMMUTABLE_FIELDS = {
"Role": ["mandateId", "featureInstanceId", "featureCode"],
diff --git a/modules/datamodels/datamodelSecurity.py b/modules/datamodels/datamodelSecurity.py
index dc8c26e6..52237226 100644
--- a/modules/datamodels/datamodelSecurity.py
+++ b/modules/datamodels/datamodelSecurity.py
@@ -12,7 +12,7 @@ Multi-Tenant Design:
from typing import Optional, Any
from pydantic import BaseModel, Field, ConfigDict, model_validator
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.shared.timeUtils import getUtcTimestamp
from .datamodelUam import AuthAuthority
from enum import Enum
@@ -31,46 +31,79 @@ class TokenPurpose(str, Enum):
DATA_CONNECTION = "dataConnection"
+@i18nModel("Token")
class Token(PowerOnModel):
"""
Authentication Token model.
-
+
Multi-Tenant Design:
- Token ist User-gebunden, NICHT Mandant-gebunden
- Ermöglicht parallele Arbeit in mehreren Mandanten
- Mandant-Kontext wird per Request-Header bestimmt
"""
- id: Optional[str] = None
- userId: str
- authority: AuthAuthority
+ id: Optional[str] = Field(
+ default=None,
+ json_schema_extra={"label": "ID"},
+ )
+ userId: str = Field(
+ ...,
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ authority: AuthAuthority = Field(
+ ...,
+ json_schema_extra={"label": "Autoritaet"},
+ )
connectionId: Optional[str] = Field(
- None, description="ID of the connection this token belongs to"
+ None,
+ description="ID of the connection this token belongs to",
+ json_schema_extra={"label": "Verbindungs-ID"},
)
tokenPurpose: Optional[TokenPurpose] = Field(
default=None,
description="authSession = gateway login JWT; dataConnection = provider OAuth for a connection",
+ json_schema_extra={"label": "Token-Verwendung"},
+ )
+ tokenAccess: str = Field(
+ ...,
+ json_schema_extra={"label": "Zugriffstoken"},
+ )
+ tokenType: str = Field(
+ default="bearer",
+ json_schema_extra={"label": "Token-Typ"},
)
- tokenAccess: str
- tokenType: str = "bearer"
expiresAt: float = Field(
- description="When the token expires (UTC timestamp in seconds)"
+ description="When the token expires (UTC timestamp in seconds)",
+ json_schema_extra={"label": "Laeuft ab am"},
+ )
+ tokenRefresh: Optional[str] = Field(
+ default=None,
+ json_schema_extra={"label": "Refresh-Token"},
)
- tokenRefresh: Optional[str] = None
status: TokenStatus = Field(
- default=TokenStatus.ACTIVE, description="Token status: active/revoked"
+ default=TokenStatus.ACTIVE,
+ description="Token status: active/revoked",
+ json_schema_extra={"label": "Status"},
)
revokedAt: Optional[float] = Field(
- None, description="When the token was revoked (UTC timestamp in seconds)"
+ None,
+ description="When the token was revoked (UTC timestamp in seconds)",
+ json_schema_extra={"label": "Widerrufen am"},
)
revokedBy: Optional[str] = Field(
- None, description="User ID who revoked the token (admin/self)"
+ None,
+ description="User ID who revoked the token (admin/self)",
+ json_schema_extra={"label": "Widerrufen von"},
+ )
+ reason: Optional[str] = Field(
+ None,
+ description="Optional revocation reason",
+ json_schema_extra={"label": "Grund"},
)
- reason: Optional[str] = Field(None, description="Optional revocation reason")
sessionId: Optional[str] = Field(
- None, description="Logical session grouping for logout revocation"
+ None,
+ description="Logical session grouping for logout revocation",
+ json_schema_extra={"label": "Sitzungs-ID"},
)
- # ENTFERNT: mandateId - Token ist nicht mehr Mandant-spezifisch
- # Mandant-Kontext wird per Request-Header (X-Mandate-Id) bestimmt
model_config = ConfigDict(use_enum_values=True)
@@ -91,51 +124,44 @@ class Token(PowerOnModel):
return data
-registerModelLabels(
- "Token",
- {"en": "Token", "de": "Token", "fr": "Jeton"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"},
- "authority": {"en": "Authority", "de": "Autorität", "fr": "Autorité"},
- "connectionId": {"en": "Connection ID", "de": "Verbindungs-ID", "fr": "ID de connexion"},
- "tokenPurpose": {"en": "Token purpose", "de": "Token-Verwendung", "fr": "Usage du jeton"},
- "tokenAccess": {"en": "Access Token", "de": "Zugriffstoken", "fr": "Jeton d'accès"},
- "tokenType": {"en": "Token Type", "de": "Token-Typ", "fr": "Type de jeton"},
- "expiresAt": {"en": "Expires At", "de": "Läuft ab am", "fr": "Expire le"},
- "tokenRefresh": {"en": "Refresh Token", "de": "Refresh-Token", "fr": "Jeton de rafraîchissement"},
- "status": {"en": "Status", "de": "Status", "fr": "Statut"},
- "revokedAt": {"en": "Revoked At", "de": "Widerrufen am", "fr": "Révoqué le"},
- "revokedBy": {"en": "Revoked By", "de": "Widerrufen von", "fr": "Révoqué par"},
- "reason": {"en": "Reason", "de": "Grund", "fr": "Raison"},
- "sessionId": {"en": "Session ID", "de": "Sitzungs-ID", "fr": "ID de session"},
- },
-)
-
-
+@i18nModel("Authentifizierungsereignis")
class AuthEvent(PowerOnModel):
"""Authentication event for audit logging."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the auth event", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- userId: str = Field(description="ID of the user this event belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- eventType: str = Field(description="Type of authentication event (e.g., 'login', 'logout', 'token_refresh')", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- timestamp: float = Field(default_factory=getUtcTimestamp, description="Unix timestamp when the event occurred", json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True})
- ipAddress: Optional[str] = Field(default=None, description="IP address from which the event originated", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- userAgent: Optional[str] = Field(default=None, description="User agent string from the request", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- success: bool = Field(default=True, description="Whether the authentication event was successful", json_schema_extra={"frontend_type": "boolean", "frontend_readonly": True, "frontend_required": True})
- details: Optional[str] = Field(default=None, description="Additional details about the event", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
-
-
-registerModelLabels(
- "AuthEvent",
- {"en": "Authentication Event", "de": "Authentifizierungsereignis", "fr": "Événement d'authentification"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"},
- "eventType": {"en": "Event Type", "de": "Ereignistyp", "fr": "Type d'événement"},
- "timestamp": {"en": "Timestamp", "de": "Zeitstempel", "fr": "Horodatage"},
- "ipAddress": {"en": "IP Address", "de": "IP-Adresse", "fr": "Adresse IP"},
- "userAgent": {"en": "User Agent", "de": "User-Agent", "fr": "Agent utilisateur"},
- "success": {"en": "Success", "de": "Erfolgreich", "fr": "Succès"},
- "details": {"en": "Details", "de": "Details", "fr": "Détails"},
- },
-)
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Unique ID of the auth event",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ userId: str = Field(
+ description="ID of the user this event belongs to",
+ json_schema_extra={"label": "Benutzer-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ eventType: str = Field(
+ description="Type of authentication event (e.g., 'login', 'logout', 'token_refresh')",
+ json_schema_extra={"label": "Ereignistyp", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ timestamp: float = Field(
+ default_factory=getUtcTimestamp,
+ description="Unix timestamp when the event occurred",
+ json_schema_extra={"label": "Zeitstempel", "frontend_type": "datetime", "frontend_readonly": True, "frontend_required": True},
+ )
+ ipAddress: Optional[str] = Field(
+ default=None,
+ description="IP address from which the event originated",
+ json_schema_extra={"label": "IP-Adresse", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ userAgent: Optional[str] = Field(
+ default=None,
+ description="User agent string from the request",
+ json_schema_extra={"label": "User-Agent", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ success: bool = Field(
+ default=True,
+ description="Whether the authentication event was successful",
+ json_schema_extra={"label": "Erfolgreich", "frontend_type": "boolean", "frontend_readonly": True, "frontend_required": True},
+ )
+ details: Optional[str] = Field(
+ default=None,
+ description="Additional details about the event",
+ json_schema_extra={"label": "Details", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
diff --git a/modules/datamodels/datamodelSubscription.py b/modules/datamodels/datamodelSubscription.py
index 1791e7a9..5a377244 100644
--- a/modules/datamodels/datamodelSubscription.py
+++ b/modules/datamodels/datamodelSubscription.py
@@ -11,7 +11,7 @@ from enum import Enum
from datetime import datetime, timezone
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel, t
import uuid
@@ -55,123 +55,234 @@ class BillingPeriodEnum(str, Enum):
# Catalog: SubscriptionPlan (static, in-memory)
# ============================================================================
+@i18nModel("Abonnement-Plan")
class SubscriptionPlan(BaseModel):
- """Plan definition (catalog entry). Not stored per mandate — static."""
- planKey: str = Field(..., description="Unique plan identifier")
- selectableByUser: bool = Field(default=True, description="Whether users can choose this plan in the UI")
+ """Plan-Definition (Katalog). Nicht pro Mandat gespeichert — statisch."""
+ planKey: str = Field(
+ ...,
+ description="Unique plan identifier",
+ json_schema_extra={"label": "Plan"},
+ )
+ selectableByUser: bool = Field(
+ default=True,
+ description="Whether users can choose this plan in the UI",
+ json_schema_extra={"label": "Waehlbar"},
+ )
- title: Dict[str, str] = Field(default_factory=dict, description="Multilingual title (en/de/fr)")
- description: Dict[str, str] = Field(default_factory=dict, description="Multilingual description")
+ title: str = Field(
+ default="",
+ description="Plan title (i18n key)",
+ json_schema_extra={"label": "Titel"},
+ )
+ description: str = Field(
+ default="",
+ description="Plan description (i18n key)",
+ json_schema_extra={"label": "Beschreibung"},
+ )
- currency: str = Field(default="CHF", description="Billing currency")
- billingPeriod: BillingPeriodEnum = Field(default=BillingPeriodEnum.MONTHLY, description="Recurring interval")
- pricePerUserCHF: float = Field(default=0.0, description="Price per active user per period")
- pricePerFeatureInstanceCHF: float = Field(default=0.0, description="Price per active feature instance per period")
- autoRenew: bool = Field(default=True, description="Stripe renews automatically at period end")
+ currency: str = Field(
+ default="CHF",
+ description="Billing currency",
+ json_schema_extra={"label": "Waehrung"},
+ )
+ billingPeriod: BillingPeriodEnum = Field(
+ default=BillingPeriodEnum.MONTHLY,
+ description="Recurring interval",
+ json_schema_extra={"label": "Abrechnungszeitraum"},
+ )
+ pricePerUserCHF: float = Field(
+ default=0.0,
+ description="Price per active user per period",
+ json_schema_extra={"label": "Preis pro User (CHF)"},
+ )
+ pricePerFeatureInstanceCHF: float = Field(
+ default=0.0,
+ description="Price per additional module beyond included (monthly, CHF)",
+ json_schema_extra={"label": "Preis pro Modul (CHF)"},
+ )
+ autoRenew: bool = Field(
+ default=True,
+ description="Stripe renews automatically at period end",
+ json_schema_extra={"label": "Auto-Verlaengerung"},
+ )
- maxUsers: Optional[int] = Field(None, description="Hard cap on active users (None = unlimited)")
- maxFeatureInstances: Optional[int] = Field(None, description="Hard cap on active feature instances (None = unlimited)")
- trialDays: Optional[int] = Field(None, description="Trial duration in days (only for trial plans)")
- maxDataVolumeMB: Optional[int] = Field(None, description="Soft-limit for data volume in MB per mandate (None = unlimited)")
- budgetAiCHF: float = Field(default=0.0, description="AI budget (CHF) included in subscription price per billing period")
- successorPlanKey: Optional[str] = Field(None, description="Plan to transition to when trial ends")
-
-
-registerModelLabels(
- "SubscriptionPlan",
- {"en": "Subscription Plan", "de": "Abonnement-Plan", "fr": "Plan d'abonnement"},
- {
- "planKey": {"en": "Plan", "de": "Plan", "fr": "Plan"},
- "selectableByUser": {"en": "Selectable", "de": "Wählbar", "fr": "Sélectionnable"},
- "billingPeriod": {"en": "Billing Period", "de": "Abrechnungszeitraum", "fr": "Période de facturation"},
- "pricePerUserCHF": {"en": "Price per User (CHF)", "de": "Preis pro User (CHF)"},
- "pricePerFeatureInstanceCHF": {"en": "Price per Instance (CHF)", "de": "Preis pro Instanz (CHF)"},
- "maxUsers": {"en": "Max Users", "de": "Max. Benutzer", "fr": "Max. utilisateurs"},
- "maxFeatureInstances": {"en": "Max Instances", "de": "Max. Instanzen", "fr": "Max. instances"},
- "maxDataVolumeMB": {"en": "Data Volume (MB)", "de": "Datenvolumen (MB)"},
- "budgetAiCHF": {"en": "AI Budget (CHF)", "de": "AI-Budget (CHF)"},
- },
-)
+ maxUsers: Optional[int] = Field(
+ None,
+ description="Hard cap on active users (None = unlimited)",
+ json_schema_extra={"label": "Max. Benutzer"},
+ )
+ maxFeatureInstances: Optional[int] = Field(
+ None,
+ description="Hard cap on active modules (None = unlimited)",
+ json_schema_extra={"label": "Max. Module"},
+ )
+ includedModules: int = Field(
+ default=0,
+ description="Number of modules included in plan at no extra charge",
+ json_schema_extra={"label": "Inkl. Module"},
+ )
+ trialDays: Optional[int] = Field(
+ None,
+ description="Trial duration in days (only for trial plans)",
+ json_schema_extra={"label": "Probentage"},
+ )
+ maxDataVolumeMB: Optional[int] = Field(
+ None,
+ description="Soft-limit for data volume in MB per mandate (None = unlimited)",
+ json_schema_extra={"label": "Datenvolumen (MB)"},
+ )
+ budgetAiCHF: float = Field(
+ default=0.0,
+ description="AI budget (CHF) total per billing period (users * budgetAiPerUserCHF at activation)",
+ json_schema_extra={"label": "AI-Budget (CHF)"},
+ )
+ budgetAiPerUserCHF: float = Field(
+ default=0.0,
+ description="AI budget per user per month (CHF). Total = users * this value.",
+ json_schema_extra={"label": "AI-Budget pro User (CHF)"},
+ )
+ successorPlanKey: Optional[str] = Field(
+ None,
+ description="Plan to transition to when trial ends",
+ json_schema_extra={"label": "Nachfolge-Plan"},
+ )
# ============================================================================
# Stripe Price mapping (persisted in DB, auto-created at bootstrap)
# ============================================================================
+@i18nModel("Stripe-Planpreise")
class StripePlanPrice(BaseModel):
- """Persisted mapping from planKey to Stripe Product/Price IDs.
- Auto-created at startup — no manual configuration needed.
- Uses separate Stripe Products for users and instances for clear invoice labels."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- planKey: str = Field(..., description="Reference to SubscriptionPlan.planKey")
- stripeProductId: str = Field("", description="Legacy single-product ID (unused)")
- stripeProductIdUsers: Optional[str] = Field(None, description="Stripe Product ID for user licenses")
- stripeProductIdInstances: Optional[str] = Field(None, description="Stripe Product ID for feature instances")
- stripePriceIdUsers: Optional[str] = Field(None, description="Stripe Price ID for user-seat line item")
- stripePriceIdInstances: Optional[str] = Field(None, description="Stripe Price ID for instance line item")
-
-
-registerModelLabels(
- "StripePlanPrice",
- {"en": "Stripe Plan Prices", "de": "Stripe-Planpreise"},
- {
- "planKey": {"en": "Plan", "de": "Plan"},
- "stripeProductIdUsers": {"en": "Product (Users)", "de": "Produkt (User)"},
- "stripeProductIdInstances": {"en": "Product (Instances)", "de": "Produkt (Instanzen)"},
- "stripePriceIdUsers": {"en": "Price ID (Users)", "de": "Preis-ID (User)"},
- "stripePriceIdInstances": {"en": "Price ID (Instances)", "de": "Preis-ID (Instanzen)"},
- },
-)
+ """Persistierte Zuordnung planKey zu Stripe Product/Price IDs."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ planKey: str = Field(
+ ...,
+ description="Reference to SubscriptionPlan.planKey",
+ json_schema_extra={"label": "Plan"},
+ )
+ stripeProductId: str = Field(
+ "",
+ description="Legacy single-product ID (unused)",
+ json_schema_extra={"label": "Stripe-Produkt-ID (Legacy)"},
+ )
+ stripeProductIdUsers: Optional[str] = Field(
+ None,
+ description="Stripe Product ID for user licenses",
+ json_schema_extra={"label": "Produkt (User)"},
+ )
+ stripeProductIdInstances: Optional[str] = Field(
+ None,
+ description="Stripe Product ID for modules",
+ json_schema_extra={"label": "Produkt (Module)"},
+ )
+ stripePriceIdUsers: Optional[str] = Field(
+ None,
+ description="Stripe Price ID for user-seat line item",
+ json_schema_extra={"label": "Preis-ID (User)"},
+ )
+ stripePriceIdInstances: Optional[str] = Field(
+ None,
+ description="Stripe Price ID for module line item",
+ json_schema_extra={"label": "Preis-ID (Module)"},
+ )
# ============================================================================
# Instance: MandateSubscription
# ============================================================================
+@i18nModel("Mandanten-Abonnement")
class MandateSubscription(PowerOnModel):
- """A subscription instance bound to a specific mandate.
- See wiki/concepts/Subscription-State-Machine.md for state transitions."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- mandateId: str = Field(..., description="Foreign key to Mandate")
- planKey: str = Field(..., description="Reference to SubscriptionPlan.planKey")
+ """Abonnement-Instanz gebunden an einen Mandanten."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ mandateId: str = Field(
+ ...,
+ description="Foreign key to Mandate",
+ json_schema_extra={"label": "Mandanten-ID"},
+ )
+ planKey: str = Field(
+ ...,
+ description="Reference to SubscriptionPlan.planKey",
+ json_schema_extra={"label": "Plan"},
+ )
- status: SubscriptionStatusEnum = Field(default=SubscriptionStatusEnum.PENDING, description="Current lifecycle status")
- recurring: bool = Field(default=True, description="True: auto-renews at period end. False: expires at period end (gekuendigt).")
+ status: SubscriptionStatusEnum = Field(
+ default=SubscriptionStatusEnum.PENDING,
+ description="Current lifecycle status",
+ json_schema_extra={"label": "Status"},
+ )
+ recurring: bool = Field(
+ default=True,
+ description="True: auto-renews at period end. False: expires at period end (gekuendigt).",
+ json_schema_extra={"label": "Wiederkehrend"},
+ )
- startedAt: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), description="Record creation timestamp")
- effectiveFrom: Optional[datetime] = Field(None, description="When this subscription becomes operative. None = immediate. Set for SCHEDULED subs.")
- endedAt: Optional[datetime] = Field(None, description="When subscription ended (terminal)")
- currentPeriodStart: Optional[datetime] = Field(None, description="Current billing period start (synced from Stripe)")
- currentPeriodEnd: Optional[datetime] = Field(None, description="Current billing period end (synced from Stripe)")
- trialEndsAt: Optional[datetime] = Field(None, description="Trial expiry timestamp")
+ startedAt: datetime = Field(
+ default_factory=lambda: datetime.now(timezone.utc),
+ description="Record creation timestamp",
+ json_schema_extra={"label": "Gestartet"},
+ )
+ effectiveFrom: Optional[datetime] = Field(
+ None,
+ description="When this subscription becomes operative. None = immediate. Set for SCHEDULED subs.",
+ json_schema_extra={"label": "Wirksam ab"},
+ )
+ endedAt: Optional[datetime] = Field(
+ None,
+ description="When subscription ended (terminal)",
+ json_schema_extra={"label": "Beendet"},
+ )
+ currentPeriodStart: Optional[datetime] = Field(
+ None,
+ description="Current billing period start (synced from Stripe)",
+ json_schema_extra={"label": "Periodenbeginn"},
+ )
+ currentPeriodEnd: Optional[datetime] = Field(
+ None,
+ description="Current billing period end (synced from Stripe)",
+ json_schema_extra={"label": "Periodenende"},
+ )
+ trialEndsAt: Optional[datetime] = Field(
+ None,
+ description="Trial expiry timestamp",
+ json_schema_extra={"label": "Trial endet"},
+ )
- snapshotPricePerUserCHF: float = Field(default=0.0, description="Price snapshot at activation (for invoice history)")
- snapshotPricePerInstanceCHF: float = Field(default=0.0, description="Price snapshot at activation")
+ snapshotPricePerUserCHF: float = Field(
+ default=0.0,
+ description="Price snapshot at activation (for invoice history)",
+ json_schema_extra={"label": "Preis/User (CHF)"},
+ )
+ snapshotPricePerInstanceCHF: float = Field(
+ default=0.0,
+ description="Price snapshot at activation (per additional module)",
+ json_schema_extra={"label": "Preis/Modul (CHF)"},
+ )
- stripeSubscriptionId: Optional[str] = Field(None, description="Stripe Subscription ID (sub_xxx)")
- stripeItemIdUsers: Optional[str] = Field(None, description="Stripe Subscription Item ID for user seats")
- stripeItemIdInstances: Optional[str] = Field(None, description="Stripe Subscription Item ID for feature instances")
-
-
-registerModelLabels(
- "MandateSubscription",
- {"en": "Mandate Subscription", "de": "Mandanten-Abonnement", "fr": "Abonnement du mandat"},
- {
- "id": {"en": "ID", "de": "ID"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"},
- "planKey": {"en": "Plan", "de": "Plan"},
- "status": {"en": "Status", "de": "Status"},
- "recurring": {"en": "Recurring", "de": "Wiederkehrend"},
- "startedAt": {"en": "Started", "de": "Gestartet"},
- "effectiveFrom": {"en": "Effective From", "de": "Wirksam ab"},
- "endedAt": {"en": "Ended", "de": "Beendet"},
- "currentPeriodStart": {"en": "Period Start", "de": "Periodenbeginn"},
- "currentPeriodEnd": {"en": "Period End", "de": "Periodenende"},
- "trialEndsAt": {"en": "Trial Ends", "de": "Trial endet"},
- "snapshotPricePerUserCHF": {"en": "Price/User (CHF)", "de": "Preis/User (CHF)"},
- "snapshotPricePerInstanceCHF": {"en": "Price/Instance (CHF)", "de": "Preis/Instanz (CHF)"},
- },
-)
+ stripeSubscriptionId: Optional[str] = Field(
+ None,
+ description="Stripe Subscription ID (sub_xxx)",
+ json_schema_extra={"label": "Stripe-Abonnement-ID"},
+ )
+ stripeItemIdUsers: Optional[str] = Field(
+ None,
+ description="Stripe Subscription Item ID for user seats",
+ json_schema_extra={"label": "Stripe-Item (User)"},
+ )
+ stripeItemIdInstances: Optional[str] = Field(
+ None,
+ description="Stripe Subscription Item ID for feature instances",
+ json_schema_extra={"label": "Stripe-Item (Instanzen)"},
+ )
# ============================================================================
@@ -182,59 +293,116 @@ BUILTIN_PLANS: Dict[str, SubscriptionPlan] = {
"ROOT": SubscriptionPlan(
planKey="ROOT",
selectableByUser=False,
- title={"en": "Root (System)", "de": "Root (System)", "fr": "Root (Système)"},
- description={"en": "Internal system plan — no billing.", "de": "Interner Systemplan — keine Verrechnung."},
+ title=t("Root (System)"),
+ description=t("Interner Systemplan — keine Verrechnung."),
billingPeriod=BillingPeriodEnum.NONE,
autoRenew=False,
maxUsers=None,
maxFeatureInstances=None,
+ includedModules=0,
maxDataVolumeMB=None,
budgetAiCHF=0.0,
+ budgetAiPerUserCHF=0.0,
),
- "TRIAL_7D": SubscriptionPlan(
- planKey="TRIAL_7D",
+ "TRIAL_14D": SubscriptionPlan(
+ planKey="TRIAL_14D",
selectableByUser=False,
- title={"en": "Free Trial (7 days)", "de": "Gratis-Testphase (7 Tage)", "fr": "Essai gratuit (7 jours)"},
- description={
- "en": "Try the platform for 7 days — 1 user, up to 3 feature instances, 5 CHF AI budget included.",
- "de": "Plattform 7 Tage testen — 1 User, bis zu 3 Feature-Instanzen, 5 CHF AI-Budget inklusive.",
- },
+ title=t("Gratis-Testphase (14 Tage)"),
+ description=t("14 Tage kostenlos testen — 1 User, 2 Module inklusive, CHF 25 AI-Budget."),
billingPeriod=BillingPeriodEnum.NONE,
autoRenew=False,
maxUsers=1,
- maxFeatureInstances=3,
- trialDays=7,
- maxDataVolumeMB=500,
- budgetAiCHF=5.0,
- successorPlanKey="STANDARD_MONTHLY",
+ maxFeatureInstances=2,
+ includedModules=2,
+ trialDays=14,
+ maxDataVolumeMB=1024,
+ budgetAiCHF=25.0,
+ budgetAiPerUserCHF=25.0,
+ successorPlanKey="STARTER_MONTHLY",
),
- "STANDARD_MONTHLY": SubscriptionPlan(
- planKey="STANDARD_MONTHLY",
+ "STARTER_MONTHLY": SubscriptionPlan(
+ planKey="STARTER_MONTHLY",
selectableByUser=True,
- title={"en": "Standard (Monthly)", "de": "Standard (Monatlich)", "fr": "Standard (Mensuel)"},
- description={
- "en": "Usage-based billing per active user and feature instance, billed monthly. Includes 10 CHF AI budget.",
- "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, monatlich. Inkl. 10 CHF AI-Budget.",
- },
+ title=t("Starter (Monatlich)"),
+ description=t("CHF 69 pro User/Monat. 2 Module inklusive, CHF 25 AI-Budget pro User."),
billingPeriod=BillingPeriodEnum.MONTHLY,
- pricePerUserCHF=79.0,
- pricePerFeatureInstanceCHF=119.0,
+ pricePerUserCHF=69.0,
+ pricePerFeatureInstanceCHF=39.0,
+ maxUsers=None,
+ includedModules=2,
maxDataVolumeMB=1024,
- budgetAiCHF=10.0,
+ budgetAiCHF=0.0,
+ budgetAiPerUserCHF=25.0,
),
- "STANDARD_YEARLY": SubscriptionPlan(
- planKey="STANDARD_YEARLY",
+ "STARTER_YEARLY": SubscriptionPlan(
+ planKey="STARTER_YEARLY",
selectableByUser=True,
- title={"en": "Standard (Yearly)", "de": "Standard (Jährlich)", "fr": "Standard (Annuel)"},
- description={
- "en": "Usage-based billing per active user and feature instance, billed yearly. Includes 120 CHF AI budget.",
- "de": "Nutzungsbasierte Abrechnung pro aktivem User und Feature-Instanz, jährlich. Inkl. 120 CHF AI-Budget.",
- },
+ title=t("Starter (Jaehrlich)"),
+ description=t("CHF 690 pro User/Jahr (-17%). 2 Module inklusive, CHF 25 AI-Budget pro User/Monat."),
billingPeriod=BillingPeriodEnum.YEARLY,
- pricePerUserCHF=948.0,
- pricePerFeatureInstanceCHF=1428.0,
+ pricePerUserCHF=690.0,
+ pricePerFeatureInstanceCHF=39.0,
+ maxUsers=None,
+ includedModules=2,
maxDataVolumeMB=1024,
- budgetAiCHF=120.0,
+ budgetAiCHF=0.0,
+ budgetAiPerUserCHF=25.0,
+ ),
+ "PROFESSIONAL_MONTHLY": SubscriptionPlan(
+ planKey="PROFESSIONAL_MONTHLY",
+ selectableByUser=True,
+ title=t("Professional (Monatlich)"),
+ description=t("CHF 99 pro User/Monat. 5 Module inklusive, CHF 50 AI-Budget pro User."),
+ billingPeriod=BillingPeriodEnum.MONTHLY,
+ pricePerUserCHF=99.0,
+ pricePerFeatureInstanceCHF=29.0,
+ maxUsers=None,
+ includedModules=5,
+ maxDataVolumeMB=5120,
+ budgetAiCHF=0.0,
+ budgetAiPerUserCHF=50.0,
+ ),
+ "PROFESSIONAL_YEARLY": SubscriptionPlan(
+ planKey="PROFESSIONAL_YEARLY",
+ selectableByUser=True,
+ title=t("Professional (Jaehrlich)"),
+ description=t("CHF 990 pro User/Jahr (-17%). 5 Module inklusive, CHF 50 AI-Budget pro User/Monat."),
+ billingPeriod=BillingPeriodEnum.YEARLY,
+ pricePerUserCHF=990.0,
+ pricePerFeatureInstanceCHF=29.0,
+ maxUsers=None,
+ includedModules=5,
+ maxDataVolumeMB=5120,
+ budgetAiCHF=0.0,
+ budgetAiPerUserCHF=50.0,
+ ),
+ "MAX_MONTHLY": SubscriptionPlan(
+ planKey="MAX_MONTHLY",
+ selectableByUser=True,
+ title=t("Max (Monatlich)"),
+ description=t("CHF 145 pro User/Monat. 15 Module inklusive, CHF 100 AI-Budget pro User."),
+ billingPeriod=BillingPeriodEnum.MONTHLY,
+ pricePerUserCHF=145.0,
+ pricePerFeatureInstanceCHF=19.0,
+ maxUsers=None,
+ includedModules=15,
+ maxDataVolumeMB=25600,
+ budgetAiCHF=0.0,
+ budgetAiPerUserCHF=100.0,
+ ),
+ "MAX_YEARLY": SubscriptionPlan(
+ planKey="MAX_YEARLY",
+ selectableByUser=True,
+ title=t("Max (Jaehrlich)"),
+ description=t("CHF 1450 pro User/Jahr (-17%). 15 Module inklusive, CHF 100 AI-Budget pro User/Monat."),
+ billingPeriod=BillingPeriodEnum.YEARLY,
+ pricePerUserCHF=1450.0,
+ pricePerFeatureInstanceCHF=19.0,
+ maxUsers=None,
+ includedModules=15,
+ maxDataVolumeMB=25600,
+ budgetAiCHF=0.0,
+ budgetAiPerUserCHF=100.0,
),
}
diff --git a/modules/datamodels/datamodelUam.py b/modules/datamodels/datamodelUam.py
index 35e9ec7c..61e7c105 100644
--- a/modules/datamodels/datamodelUam.py
+++ b/modules/datamodels/datamodelUam.py
@@ -14,7 +14,7 @@ from typing import Optional, List, Dict, Any
from enum import Enum
from pydantic import BaseModel, Field, EmailStr, field_validator, computed_field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel, normalizePrimaryLanguageTag
from modules.shared.timeUtils import getUtcTimestamp
@@ -61,6 +61,7 @@ class UserPermissions(BaseModel):
)
+@i18nModel("Mandant")
class Mandate(PowerOnModel):
"""
Mandate (Mandant/Tenant) model.
@@ -69,31 +70,31 @@ class Mandate(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the mandate",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False, "label": "ID"},
)
name: str = Field(
description="Name of the mandate",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True, "label": "Name"},
)
label: Optional[str] = Field(
default=None,
description="Display label of the mandate",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False, "label": "Label"},
)
enabled: bool = Field(
default=True,
description="Indicates whether the mandate is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False, "label": "Aktiviert"},
)
isSystem: bool = Field(
default=False,
description="Whether this is a system mandate (e.g. root mandate). Cannot be deleted.",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False, "label": "System-Mandant"},
)
deletedAt: Optional[float] = Field(
default=None,
description="Timestamp when the mandate was soft-deleted. After 30 days, hard-delete is triggered.",
- json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False}
+ json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Gelöscht am"},
)
@field_validator('isSystem', mode='before')
@@ -104,38 +105,91 @@ class Mandate(PowerOnModel):
return False
return v
-registerModelLabels(
- "Mandate",
- {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "name": {"en": "Name", "de": "Name", "fr": "Nom"},
- "label": {"en": "Label", "de": "Label", "fr": "Libellé"},
- "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"},
- "isSystem": {"en": "System Mandate", "de": "System-Mandant", "fr": "Mandat système"},
- "deletedAt": {"en": "Deleted at", "de": "Gelöscht am", "fr": "Supprimé le"},
- },
-)
-
-
+@i18nModel("Benutzerverbindung")
class UserConnection(PowerOnModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the connection", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- userId: str = Field(description="ID of the user this connection belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- authority: AuthAuthority = Field(description="Authentication authority", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": "/api/connections/authorities/options"})
- externalId: str = Field(description="User ID in the external system", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- externalUsername: str = Field(description="Username in the external system", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
- externalEmail: Optional[EmailStr] = Field(None, description="Email in the external system", json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": False})
- status: ConnectionStatus = Field(default=ConnectionStatus.ACTIVE, description="Connection status", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": "/api/connections/statuses/options"})
- connectedAt: float = Field(default_factory=getUtcTimestamp, description="When the connection was established (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
- lastChecked: float = Field(default_factory=getUtcTimestamp, description="When the connection was last verified (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
- expiresAt: Optional[float] = Field(None, description="When the connection expires (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
- tokenStatus: Optional[str] = Field(None, description="Current token status: active, expired, none", json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": [
- {"value": "active", "label": {"en": "Active", "fr": "Actif"}},
- {"value": "expired", "label": {"en": "Expired", "fr": "Expiré"}},
- {"value": "none", "label": {"en": "None", "fr": "Aucun"}},
- ]})
- tokenExpiresAt: Optional[float] = Field(None, description="When the current token expires (UTC timestamp in seconds)", json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False})
- grantedScopes: Optional[List[str]] = Field(None, description="OAuth scopes granted for this connection", json_schema_extra={"frontend_type": "list", "frontend_readonly": True, "frontend_required": False})
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Unique ID of the connection",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "ID"},
+ )
+ userId: str = Field(
+ description="ID of the user this connection belongs to",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Benutzer-ID"},
+ )
+ authority: AuthAuthority = Field(
+ description="Authentication authority",
+ json_schema_extra={
+ "frontend_type": "select",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "frontend_options": "/api/connections/authorities/options",
+ "label": "Autorität",
+ },
+ )
+ externalId: str = Field(
+ description="User ID in the external system",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Externe ID"},
+ )
+ externalUsername: str = Field(
+ description="Username in the external system",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False, "label": "Externer Benutzername"},
+ )
+ externalEmail: Optional[EmailStr] = Field(
+ None,
+ description="Email in the external system",
+ json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": False, "label": "Externe E-Mail"},
+ )
+ status: ConnectionStatus = Field(
+ default=ConnectionStatus.ACTIVE,
+ description="Connection status",
+ json_schema_extra={
+ "frontend_type": "select",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ "frontend_options": "/api/connections/statuses/options",
+ "label": "Status",
+ },
+ )
+ connectedAt: float = Field(
+ default_factory=getUtcTimestamp,
+ description="When the connection was established (UTC timestamp in seconds)",
+ json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Verbunden am"},
+ )
+ lastChecked: float = Field(
+ default_factory=getUtcTimestamp,
+ description="When the connection was last verified (UTC timestamp in seconds)",
+ json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Zuletzt geprüft"},
+ )
+ expiresAt: Optional[float] = Field(
+ None,
+ description="When the connection expires (UTC timestamp in seconds)",
+ json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Läuft ab am"},
+ )
+ tokenStatus: Optional[str] = Field(
+ None,
+ description="Current token status: active, expired, none",
+ json_schema_extra={
+ "frontend_type": "select",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "frontend_options": [
+ {"value": "active", "label": "Active"},
+ {"value": "expired", "label": "Expired"},
+ {"value": "none", "label": "None"},
+ ],
+ "label": "Verbindungsstatus",
+ },
+ )
+ tokenExpiresAt: Optional[float] = Field(
+ None,
+ description="When the current token expires (UTC timestamp in seconds)",
+ json_schema_extra={"frontend_type": "timestamp", "frontend_readonly": True, "frontend_required": False, "label": "Token läuft ab am"},
+ )
+ grantedScopes: Optional[List[str]] = Field(
+ None,
+ description="OAuth scopes granted for this connection",
+ json_schema_extra={"frontend_type": "list", "frontend_readonly": True, "frontend_required": False, "label": "Gewährte Berechtigungen"},
+ )
@computed_field
@computed_field
@@ -157,29 +211,7 @@ class UserConnection(PowerOnModel):
return f"{authorityLabels.get(self.authority.value, self.authority.value)}: {self.externalUsername}"
-registerModelLabels(
- "UserConnection",
- {"en": "User Connection", "de": "Benutzerverbindung", "fr": "Connexion utilisateur"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"},
- "authority": {"en": "Authority", "de": "Autorität", "fr": "Autorité"},
- "externalId": {"en": "External ID", "de": "Externe ID", "fr": "ID externe"},
- "externalUsername": {"en": "External Username", "de": "Externer Benutzername", "fr": "Nom d'utilisateur externe"},
- "externalEmail": {"en": "External Email", "de": "Externe E-Mail", "fr": "Email externe"},
- "status": {"en": "Status", "de": "Status", "fr": "Statut"},
- "connectedAt": {"en": "Connected At", "de": "Verbunden am", "fr": "Connecté le"},
- "lastChecked": {"en": "Last Checked", "de": "Zuletzt geprüft", "fr": "Dernière vérification"},
- "expiresAt": {"en": "Expires At", "de": "Läuft ab am", "fr": "Expire le"},
- "tokenStatus": {"en": "Connection Status", "de": "Verbindungsstatus", "fr": "Statut de connexion"},
- "tokenExpiresAt": {"en": "Expires At", "de": "Läuft ab am", "fr": "Expire le"},
- "grantedScopes": {"en": "Granted Scopes", "de": "Gewährte Berechtigungen", "fr": "Autorisations accordées"},
- "connectionReference": {"en": "Connection Reference", "de": "Verbindungsreferenz", "fr": "Référence de connexion"},
- "displayLabel": {"en": "Display Label", "de": "Anzeigebezeichnung", "fr": "Libellé d'affichage"},
- },
-)
-
-
+@i18nModel("Benutzer")
class User(PowerOnModel):
"""
User model.
@@ -193,40 +225,40 @@ class User(PowerOnModel):
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the user",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False, "label": "ID"},
)
username: str = Field(
description="Username for login (immutable after creation)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Benutzername"},
)
email: Optional[EmailStr] = Field(
default=None,
description="Email address of the user",
- json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": True}
+ json_schema_extra={"frontend_type": "email", "frontend_readonly": False, "frontend_required": True, "label": "E-Mail"},
)
fullName: Optional[str] = Field(
default=None,
description="Full name of the user",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False, "label": "Vollständiger Name"},
)
language: str = Field(
default="de",
- description="Preferred language of the user (ISO 639-1 code: de, en, fr, it)",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": True, "frontend_options": [
- {"value": "de", "label": {"en": "Deutsch", "de": "Deutsch", "fr": "Allemand"}},
- {"value": "en", "label": {"en": "English", "de": "Englisch", "fr": "Anglais"}},
- {"value": "fr", "label": {"en": "Français", "de": "Französisch", "fr": "Français"}},
- {"value": "it", "label": {"en": "Italiano", "de": "Italienisch", "fr": "Italien"}},
- ]}
+ description="Preferred UI language code (must exist as UiLanguageSet).",
+ json_schema_extra={
+ "frontend_type": "select",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ "frontend_options": "/api/i18n/codes",
+ "label": "Sprache",
+ },
)
@field_validator('language', mode='before')
@classmethod
def _normalizeLanguage(cls, v):
- """Normalize language to valid ISO 639-1 code."""
+ """Normalize to primary language subtag (2–8 letters); default remains ``de``."""
if v is None:
return "de"
- # Map common variations to standard codes
langMap = {
'english': 'en', 'englisch': 'en',
'german': 'de', 'deutsch': 'de',
@@ -236,22 +268,18 @@ class User(PowerOnModel):
normalized = str(v).lower().strip()
if normalized in langMap:
return langMap[normalized]
- # If already a valid code, return as-is
- if normalized in ['de', 'en', 'fr', 'it']:
- return normalized
- # Default fallback
- return "de"
+ return normalizePrimaryLanguageTag(normalized, "de")
enabled: bool = Field(
default=True,
description="Indicates whether the user is enabled",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False, "label": "Aktiviert"},
)
isSysAdmin: bool = Field(
default=False,
description="Global SysAdmin flag. SysAdmin = System-Zugriff, KEIN Daten-Zugriff!",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False}
+ json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False, "label": "System-Admin"},
)
@field_validator('isSysAdmin', mode='before')
@@ -265,48 +293,45 @@ class User(PowerOnModel):
authenticationAuthority: AuthAuthority = Field(
default=AuthAuthority.LOCAL,
description="Primary authentication authority",
- json_schema_extra={"frontend_type": "select", "frontend_readonly": True, "frontend_required": False, "frontend_options": "/api/connections/authorities/options"}
+ json_schema_extra={
+ "frontend_type": "select",
+ "frontend_readonly": True,
+ "frontend_required": False,
+ "frontend_options": "/api/connections/authorities/options",
+ "label": "Authentifizierung",
+ },
)
roleLabels: List[str] = Field(
default_factory=list,
description="Role labels (from DB or enriched when loading users)",
- json_schema_extra={"frontend_type": "multiselect", "frontend_readonly": True, "frontend_visible": False, "frontend_required": False},
+ json_schema_extra={
+ "frontend_type": "multiselect",
+ "frontend_readonly": True,
+ "frontend_visible": False,
+ "frontend_required": False,
+ "label": "Rollen-Labels",
+ },
)
-registerModelLabels(
- "User",
- {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "username": {"en": "Username", "de": "Benutzername", "fr": "Nom d'utilisateur"},
- "email": {"en": "Email", "de": "E-Mail", "fr": "Email"},
- "fullName": {"en": "Full Name", "de": "Vollständiger Name", "fr": "Nom complet"},
- "language": {"en": "Language", "de": "Sprache", "fr": "Langue"},
- "enabled": {"en": "Enabled", "de": "Aktiviert", "fr": "Activé"},
- "isSysAdmin": {"en": "System Admin", "de": "System-Admin", "fr": "Admin système"},
- "authenticationAuthority": {"en": "Auth Authority", "de": "Authentifizierung", "fr": "Autorité d'authentification"},
- "roleLabels": {"en": "Role Labels", "de": "Rollen-Labels", "fr": "Libellés de rôles"},
- },
-)
-
-
+@i18nModel("Benutzerzugang")
class UserInDB(User):
"""User model with password hash for database storage."""
- hashedPassword: Optional[str] = Field(None, description="Hash of the user password")
- resetToken: Optional[str] = Field(None, description="Password reset token (UUID)")
- resetTokenExpires: Optional[float] = Field(None, description="Reset token expiration (UTC timestamp in seconds)")
-
-
-registerModelLabels(
- "UserInDB",
- {"en": "User Access", "de": "Benutzerzugang", "fr": "Accès de l'utilisateur"},
- {
- "hashedPassword": {"en": "Password hash", "de": "Passwort-Hash", "fr": "Hachage de mot de passe"},
- "resetToken": {"en": "Reset Token", "de": "Reset-Token", "fr": "Jeton de réinitialisation"},
- "resetTokenExpires": {"en": "Reset Token Expires", "de": "Token läuft ab", "fr": "Expiration du jeton"},
- },
-)
+ hashedPassword: Optional[str] = Field(
+ None,
+ description="Hash of the user password",
+ json_schema_extra={"label": "Passwort-Hash"},
+ )
+ resetToken: Optional[str] = Field(
+ None,
+ description="Password reset token (UUID)",
+ json_schema_extra={"label": "Reset-Token"},
+ )
+ resetTokenExpires: Optional[float] = Field(
+ None,
+ description="Reset token expiration (UTC timestamp in seconds)",
+ json_schema_extra={"label": "Token läuft ab"},
+ )
def _normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]:
@@ -336,17 +361,50 @@ def _normalizeTtsVoiceMap(value: Any) -> Optional[Dict[str, str]]:
return out if out else None
+@i18nModel("Spracheinstellungen")
class UserVoicePreferences(PowerOnModel):
"""User-level voice/language preferences, shared across all features."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key")
- userId: str = Field(description="User ID")
- mandateId: Optional[str] = Field(default=None, description="Mandate scope (None = global for user)")
- sttLanguage: str = Field(default="de-DE", description="Speech-to-text language code")
- ttsLanguage: str = Field(default="de-DE", description="Text-to-speech language code")
- ttsVoice: Optional[str] = Field(default=None, description="Preferred TTS voice identifier")
- ttsVoiceMap: Optional[Dict[str, str]] = Field(default=None, description="Language-to-voice mapping")
- translationSourceLanguage: Optional[str] = Field(default=None, description="Source language for translations")
- translationTargetLanguage: Optional[str] = Field(default=None, description="Target language for translations")
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID"},
+ )
+ userId: str = Field(description="User ID", json_schema_extra={"label": "Benutzer-ID"})
+ mandateId: Optional[str] = Field(
+ default=None,
+ description="Mandate scope (None = global for user)",
+ json_schema_extra={"label": "Mandanten-ID"},
+ )
+ sttLanguage: str = Field(
+ default="de-DE",
+ description="Speech-to-text language code",
+ json_schema_extra={"label": "STT-Sprache"},
+ )
+ ttsLanguage: str = Field(
+ default="de-DE",
+ description="Text-to-speech language code",
+ json_schema_extra={"label": "TTS-Sprache"},
+ )
+ ttsVoice: Optional[str] = Field(
+ default=None,
+ description="Preferred TTS voice identifier",
+ json_schema_extra={"label": "TTS-Stimme"},
+ )
+ ttsVoiceMap: Optional[Dict[str, str]] = Field(
+ default=None,
+ description="Language-to-voice mapping",
+ json_schema_extra={"label": "Stimmen-Zuordnung"},
+ )
+ translationSourceLanguage: Optional[str] = Field(
+ default=None,
+ description="Source language for translations",
+ json_schema_extra={"label": "Übersetzung Quelle"},
+ )
+ translationTargetLanguage: Optional[str] = Field(
+ default=None,
+ description="Target language for translations",
+ json_schema_extra={"label": "Übersetzung Ziel"},
+ )
@field_validator("ttsVoiceMap", mode="before")
@classmethod
@@ -354,18 +412,3 @@ class UserVoicePreferences(PowerOnModel):
return _normalizeTtsVoiceMap(value)
-registerModelLabels(
- "UserVoicePreferences",
- {"en": "Voice Preferences", "de": "Spracheinstellungen", "fr": "Préférences vocales"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "userId": {"en": "User ID", "de": "Benutzer-ID", "fr": "ID utilisateur"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
- "sttLanguage": {"en": "STT Language", "de": "STT-Sprache", "fr": "Langue STT"},
- "ttsLanguage": {"en": "TTS Language", "de": "TTS-Sprache", "fr": "Langue TTS"},
- "ttsVoice": {"en": "TTS Voice", "de": "TTS-Stimme", "fr": "Voix TTS"},
- "ttsVoiceMap": {"en": "Voice Map", "de": "Stimmen-Zuordnung", "fr": "Carte des voix"},
- "translationSourceLanguage": {"en": "Translation Source", "de": "Übersetzung Quelle", "fr": "Langue source"},
- "translationTargetLanguage": {"en": "Translation Target", "de": "Übersetzung Ziel", "fr": "Langue cible"},
- },
-)
diff --git a/modules/datamodels/datamodelUiLanguage.py b/modules/datamodels/datamodelUiLanguage.py
new file mode 100644
index 00000000..4c589bb3
--- /dev/null
+++ b/modules/datamodels/datamodelUiLanguage.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""UI language sets: structured i18n entries (context, key, value)."""
+
+from typing import List, Literal
+
+from pydantic import BaseModel, Field
+
+from modules.datamodels.datamodelBase import PowerOnModel
+from modules.shared.i18nRegistry import i18nModel
+
+
+UiLanguageStatus = Literal["complete", "incomplete", "generating"]
+
+
+class I18nEntry(BaseModel):
+ """Single translation entry within a language set.
+
+ context: origin of the key, e.g. "ui" for frontend elements,
+ "db.management.files.name" for backend data objects.
+ key: German plaintext (the canonical identifier across all sets).
+ value: For xx (base set): UI context description for AI translation.
+ For language sets (de, en, ...): the translated text.
+ """
+
+ context: str = Field(
+ ...,
+ description="Origin: 'ui' for frontend, 'db...' for backend objects",
+ )
+ key: str = Field(
+ ...,
+ description="German plaintext key (canonical identifier)",
+ )
+ value: str = Field(
+ default="",
+ description="Translation (language sets) or context description (xx base set)",
+ )
+
+
+@i18nModel("UI-Sprachset")
+class UiLanguageSet(PowerOnModel):
+ """Ein Sprachset pro Sprache. id = ISO 639-1 Code oder 'xx' (Basisset). Enthaelt alle Uebersetzungen."""
+
+ id: str = Field(
+ ...,
+ description="ISO 639-1 language code or 'xx' for the base set",
+ json_schema_extra={
+ "label": "Code",
+ "frontend_type": "text",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ },
+ )
+ label: str = Field(
+ ...,
+ description="Human-readable language name",
+ json_schema_extra={
+ "label": "Bezeichnung",
+ "frontend_type": "text",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ },
+ )
+ entries: List[I18nEntry] = Field(
+ default_factory=list,
+ description="Translation entries: list of {context, key, value}",
+ json_schema_extra={
+ "label": "Eintraege",
+ "frontend_type": "textarea",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ },
+ )
+ status: UiLanguageStatus = Field(
+ default="complete",
+ description="complete | incomplete | generating",
+ json_schema_extra={
+ "label": "Status",
+ "frontend_type": "select",
+ "frontend_readonly": False,
+ "frontend_required": True,
+ "frontend_options": [
+ {"value": "complete", "label": "Vollständig"},
+ {"value": "incomplete", "label": "Unvollständig"},
+ {"value": "generating", "label": "Wird erzeugt"},
+ ],
+ },
+ )
+ isDefault: bool = Field(
+ default=False,
+ description="True only for the xx base set",
+ json_schema_extra={
+ "label": "Standard",
+ "frontend_type": "boolean",
+ "frontend_readonly": False,
+ "frontend_required": False,
+ },
+ )
diff --git a/modules/datamodels/datamodelUtils.py b/modules/datamodels/datamodelUtils.py
index 1088cb31..0c134ed2 100644
--- a/modules/datamodels/datamodelUtils.py
+++ b/modules/datamodels/datamodelUtils.py
@@ -2,83 +2,139 @@
# All rights reserved.
"""Utility datamodels: Prompt, TextMultilingual."""
-from typing import Dict, Optional
-from pydantic import BaseModel, Field, field_validator
+import json
+from typing import Any, Dict
+
+from pydantic import BaseModel, Field, field_validator, model_validator
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
+@i18nModel("Prompt")
class Prompt(PowerOnModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- mandateId: str = Field(default="", description="ID of the mandate this prompt belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- isSystem: bool = Field(default=False, description="System prompt visible to all users (read-only for non-SysAdmin)", json_schema_extra={"frontend_type": "boolean", "frontend_readonly": True, "frontend_required": False})
- content: str = Field(description="Content of the prompt", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": True})
- name: str = Field(description="Name of the prompt", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": True})
-
+ """Benutzer- oder System-Prompt fuer die KI."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ mandateId: str = Field(
+ default="",
+ description="ID of the mandate this prompt belongs to",
+ json_schema_extra={"label": "Mandanten-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ isSystem: bool = Field(
+ default=False,
+ description="System prompt visible to all users (read-only for non-SysAdmin)",
+ json_schema_extra={"label": "System", "frontend_type": "boolean", "frontend_readonly": True, "frontend_required": False},
+ )
+ content: str = Field(
+ description="Content of the prompt",
+ json_schema_extra={"label": "Inhalt", "frontend_type": "textarea", "frontend_readonly": False, "frontend_required": True},
+ )
+ name: str = Field(
+ description="Name of the prompt",
+ json_schema_extra={"label": "Name", "frontend_type": "text", "frontend_readonly": False, "frontend_required": True},
+ )
+
@field_validator('isSystem', mode='before')
@classmethod
def _coerceIsSystem(cls, v):
- """Existing records may have isSystem=None (field didn't exist). Treat None as False."""
if v is None:
return False
return v
-registerModelLabels(
- "Prompt",
- {"en": "Prompt", "fr": "Invite"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat"},
- "isSystem": {"en": "System", "fr": "Système"},
- "content": {"en": "Content", "fr": "Contenu"},
- "name": {"en": "Name", "fr": "Nom"},
- },
-)
class TextMultilingual(BaseModel):
+ """Multilingual text field stored as JSONB: {"xx": "source text", "de": "...", "en": "...", ...}.
+
+ - xx = source/default text (required). Same role as xx in the UI i18n system.
+ - All language codes (de, en, fr, ...) are dynamic, populated via batch translation.
+ - No hardcoded language fields. The DB column is JSONB with arbitrary keys.
"""
- Multilingual text field supporting multiple languages.
- Default languages: en (English), ge (German), fr (French), it (Italian)
- English (en) is the default/required language.
- """
- en: str = Field(description="English text (default language, required)")
- ge: Optional[str] = Field(None, description="German text")
- fr: Optional[str] = Field(None, description="French text")
- it: Optional[str] = Field(None, description="Italian text")
-
- @field_validator('en')
+
+ model_config = {"extra": "allow"}
+
+ xx: str = Field(description="Source/default text (required)")
+
+ @model_validator(mode='before')
@classmethod
- def validate_en_required(cls, v):
- """Ensure English text is not empty"""
+ def _ensureXx(cls, data: Any) -> Any:
+ """Derive xx from existing language keys when missing (legacy DB rows)."""
+ if not isinstance(data, dict):
+ return data
+ if data.get('xx') and isinstance(data['xx'], str) and data['xx'].strip():
+ return data
+ fallback = data.get('de') or data.get('en')
+ if not fallback or not isinstance(fallback, str) or not fallback.strip():
+ for v in data.values():
+ if v and isinstance(v, str) and v.strip():
+ fallback = v
+ break
+ data['xx'] = fallback.strip() if fallback and isinstance(fallback, str) else '—'
+ return data
+
+ @field_validator('xx')
+ @classmethod
+ def _validateXxRequired(cls, v):
if not v or not v.strip():
- raise ValueError("English text (en) is required and cannot be empty")
+ raise ValueError("Source text (xx) is required and cannot be empty")
return v
-
+
def model_dump(self, **kwargs) -> Dict[str, str]:
- """Return as dictionary, filtering out None values"""
- result = {}
- for lang in ['en', 'ge', 'fr', 'it']:
- value = getattr(self, lang, None)
- if value is not None:
- result[lang] = value
+ result = {"xx": self.xx}
+ if self.__pydantic_extra__:
+ for k, v in self.__pydantic_extra__.items():
+ if v is not None and isinstance(v, str):
+ result[k] = v
return result
-
+
@classmethod
def from_dict(cls, data: Dict[str, str]) -> 'TextMultilingual':
- """Create TextMultilingual from dictionary"""
- return cls(
- en=data.get('en', ''),
- ge=data.get('ge'),
- fr=data.get('fr'),
- it=data.get('it')
- )
-
- def get_text(self, lang: str = 'en') -> str:
- """Get text for a specific language, fallback to English if not available"""
- value = getattr(self, lang, None)
- if value:
+ cleaned = {k: v for k, v in data.items() if v is not None and isinstance(v, str)}
+ if not cleaned.get('xx'):
+ cleaned['xx'] = cleaned.get('de') or next((v for v in cleaned.values() if v), '—')
+ return cls(**cleaned)
+
+ def get_text(self, lang: str = 'de') -> str:
+ """Get text for a language. Falls back to xx (source text)."""
+ if lang == 'xx':
+ return self.xx
+ extra = self.__pydantic_extra__ or {}
+ value = extra.get(lang)
+ if value and isinstance(value, str):
return value
- return self.en # Fallback to English
+ return self.xx
+
+ @classmethod
+ def fromUniform(cls, text: str) -> "TextMultilingual":
+ """Create with source text only. Languages are populated by batch translation."""
+ t = text.strip()
+ if not t:
+ raise ValueError("Text must be non-empty")
+ return cls(xx=t)
+def coerce_text_multilingual(val: Any) -> TextMultilingual:
+ """Normalize str, dict, or TextMultilingual into a valid TextMultilingual instance."""
+ if isinstance(val, TextMultilingual):
+ return val
+ if isinstance(val, dict):
+ if not val:
+ return TextMultilingual.fromUniform("—")
+ cleaned = {k: v for k, v in val.items() if v is not None and isinstance(v, str)}
+ if not cleaned.get("xx"):
+ cleaned["xx"] = cleaned.get("de") or next((v for v in cleaned.values() if v), "—")
+ return TextMultilingual(**cleaned)
+ if isinstance(val, str) and val.strip():
+ s = val.strip()
+ if s.startswith("{") and s.endswith("}"):
+ try:
+ parsed = json.loads(s)
+ if isinstance(parsed, dict):
+ return coerce_text_multilingual(parsed)
+ except json.JSONDecodeError:
+ pass
+ return TextMultilingual.fromUniform(s)
+ return TextMultilingual.fromUniform("—")
diff --git a/modules/datamodels/datamodelWorkflow.py b/modules/datamodels/datamodelWorkflow.py
index 1a1e49e8..490d9fb0 100644
--- a/modules/datamodels/datamodelWorkflow.py
+++ b/modules/datamodels/datamodelWorkflow.py
@@ -6,45 +6,52 @@ Workflow execution models for action definitions, AI responses, and workflow-lev
from typing import Dict, Any, List, Optional, TYPE_CHECKING
from pydantic import BaseModel, Field
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.shared.jsonUtils import extractJsonString, tryParseJson, repairBrokenJson
# Import DocumentReferenceList at runtime (needed for ActionDefinition)
from modules.datamodels.datamodelDocref import DocumentReferenceList
+@i18nModel("Aktionsdefinition")
class ActionDefinition(BaseModel):
"""Action definition with selection and parameters from planning phase"""
# Core action selection (Stage 1)
- action: str = Field(description="Compound action name (method.action)")
- actionObjective: str = Field(description="Objective for this action")
+ action: str = Field(description="Compound action name (method.action)", json_schema_extra={"label": "Aktion"})
+ actionObjective: str = Field(description="Objective for this action", json_schema_extra={"label": "Aktionsziel"})
userMessage: Optional[str] = Field(
None,
- description="User-friendly message in user's language explaining what this action will do (generated by AI in prompts)"
+ description="User-friendly message in user's language explaining what this action will do (generated by AI in prompts)",
+ json_schema_extra={"label": "Benutzernachricht"},
)
parametersContext: Optional[str] = Field(
None,
- description="Context for parameter generation"
+ description="Context for parameter generation",
+ json_schema_extra={"label": "Parameter-Kontext"},
)
learnings: List[str] = Field(
default_factory=list,
- description="Learnings from previous actions"
+ description="Learnings from previous actions",
+ json_schema_extra={"label": "Erkenntnisse"},
)
# Resources (ALWAYS defined in Stage 1 if action needs them)
documentList: Optional[DocumentReferenceList] = Field(
None,
- description="Document references (ALWAYS defined in Stage 1 if action needs documents)"
+ description="Document references (ALWAYS defined in Stage 1 if action needs documents)",
+ json_schema_extra={"label": "Dokumentenliste"},
)
connectionReference: Optional[str] = Field(
None,
- description="Connection reference (ALWAYS defined in Stage 1 if action needs connection)"
+ description="Connection reference (ALWAYS defined in Stage 1 if action needs connection)",
+ json_schema_extra={"label": "Verbindungsreferenz"},
)
# Parameters (may be defined in Stage 1 OR Stage 2, depending on action and actionObjective)
parameters: Optional[Dict[str, Any]] = Field(
None,
- description="Action-specific parameters (generated in Stage 2 for complex actions, or inferred from actionObjective for simple actions)"
+ description="Action-specific parameters (generated in Stage 2 for complex actions, or inferred from actionObjective for simple actions)",
+ json_schema_extra={"label": "Parameter"},
)
def hasParameters(self) -> bool:
@@ -75,34 +82,47 @@ class ActionDefinition(BaseModel):
self.connectionReference = connectionRef
+@i18nModel("KI-Antwort-Metadaten")
class AiResponseMetadata(BaseModel):
"""Metadata for AI response (varies by operation type)."""
# Document Generation Metadata
- title: Optional[str] = Field(None, description="Document title")
- filename: Optional[str] = Field(None, description="Document filename")
+ title: Optional[str] = Field(None, description="Document title", json_schema_extra={"label": "Titel"})
+ filename: Optional[str] = Field(None, description="Document filename", json_schema_extra={"label": "Dateiname"})
# Operation-Specific Metadata
- operationType: Optional[str] = Field(None, description="Type of operation performed")
- schemaVersion: Optional[str] = Field(None, description="Schema version (e.g., 'parameters_v1')", alias="schema")
- extractionMethod: Optional[str] = Field(None, description="Method used for extraction")
- sourceDocuments: Optional[List[str]] = Field(None, description="Source document references")
+ operationType: Optional[str] = Field(None, description="Type of operation performed", json_schema_extra={"label": "Vorgangstyp"})
+ schemaVersion: Optional[str] = Field(
+ None,
+ description="Schema version (e.g., 'parameters_v1')",
+ alias="schema",
+ json_schema_extra={"label": "Schema-Version"},
+ )
+ extractionMethod: Optional[str] = Field(None, description="Method used for extraction", json_schema_extra={"label": "Extraktionsmethode"})
+ sourceDocuments: Optional[List[str]] = Field(None, description="Source document references", json_schema_extra={"label": "Quelldokumente"})
# Additional metadata (for extensibility)
- additionalData: Optional[Dict[str, Any]] = Field(None, description="Additional operation-specific metadata")
-
-
-class DocumentData(BaseModel):
- """Single document in response"""
- documentName: str = Field(description="Document name")
- documentData: Any = Field(description="Document data (can be str, bytes, dict, etc.)")
- mimeType: str = Field(description="MIME type of the document")
- sourceJson: Optional[Dict[str, Any]] = Field(
+ additionalData: Optional[Dict[str, Any]] = Field(
None,
- description="Source JSON structure (preserved when rendering to xlsx/docx/pdf)"
+ description="Additional operation-specific metadata",
+ json_schema_extra={"label": "Zusätzliche Daten"},
)
+@i18nModel("Dokumentdaten")
+class DocumentData(BaseModel):
+ """Single document in response"""
+ documentName: str = Field(description="Document name", json_schema_extra={"label": "Dokumentname"})
+ documentData: Any = Field(description="Document data (can be str, bytes, dict, etc.)", json_schema_extra={"label": "Dokumentdaten"})
+ mimeType: str = Field(description="MIME type of the document", json_schema_extra={"label": "MIME-Typ"})
+ sourceJson: Optional[Dict[str, Any]] = Field(
+ None,
+ description="Source JSON structure (preserved when rendering to xlsx/docx/pdf)",
+ json_schema_extra={"label": "Quell-JSON"},
+ )
+
+
+@i18nModel("Extraktionsparameter")
class ExtractContentParameters(BaseModel):
"""Parameters for extraction action.
@@ -110,24 +130,34 @@ class ExtractContentParameters(BaseModel):
All action parameter models follow this pattern: defined in the same module as the action.
However, since this is a workflow-level model used across the system, it's defined here.
"""
- documentList: DocumentReferenceList = Field(description="Document references to extract content from")
+ documentList: DocumentReferenceList = Field(
+ description="Document references to extract content from",
+ json_schema_extra={"label": "Dokumentenliste"},
+ )
extractionOptions: Optional[Any] = Field( # ExtractionOptions - forward reference
None,
- description="Extraction options (determined dynamically based on task and document characteristics)"
+ description="Extraction options (determined dynamically based on task and document characteristics)",
+ json_schema_extra={"label": "Extraktionsoptionen"},
)
+@i18nModel("KI-Antwort")
class AiResponse(BaseModel):
"""Unified response from all AI calls (planning, text, documents)"""
- content: str = Field(description="Response content (JSON string for planning, text for analysis, unified JSON for documents)")
+ content: str = Field(
+ description="Response content (JSON string for planning, text for analysis, unified JSON for documents)",
+ json_schema_extra={"label": "Inhalt"},
+ )
metadata: Optional[AiResponseMetadata] = Field(
None,
- description="Response metadata (varies by operation type)"
+ description="Response metadata (varies by operation type)",
+ json_schema_extra={"label": "Metadaten"},
)
documents: Optional[List[DocumentData]] = Field(
None,
- description="Generated documents (only for document generation operations)"
+ description="Generated documents (only for document generation operations)",
+ json_schema_extra={"label": "Dokumente"},
)
def toJson(self) -> Dict[str, Any]:
@@ -186,278 +216,88 @@ class AiResponse(BaseModel):
# Workflow-level models
+@i18nModel("Anfragekontext")
class RequestContext(BaseModel):
"""Normalized request context from user input"""
- originalPrompt: str = Field(description="Original user prompt")
+ originalPrompt: str = Field(description="Original user prompt", json_schema_extra={"label": "Ursprüngliche Eingabe"})
documents: List[Any] = Field( # ChatDocument - forward reference
default_factory=list,
- description="Documents provided by user"
+ description="Documents provided by user",
+ json_schema_extra={"label": "Dokumente"},
)
- userLanguage: str = Field(description="User's language")
+ userLanguage: str = Field(description="User's language", json_schema_extra={"label": "Benutzersprache"})
detectedComplexity: str = Field(
- description="Complexity level: simple, moderate, complex"
+ description="Complexity level: simple, moderate, complex",
+ json_schema_extra={"label": "Erkannte Komplexität"},
)
- requiresDocuments: bool = Field(default=False, description="Whether request requires documents")
- requiresWebResearch: bool = Field(default=False, description="Whether request requires web research")
- requiresAnalysis: bool = Field(default=False, description="Whether request requires analysis")
- expectedOutputFormat: Optional[str] = Field(None, description="Expected output format")
- expectedOutputType: Optional[str] = Field(None, description="Expected output type: answer, document, analysis")
+ requiresDocuments: bool = Field(default=False, description="Whether request requires documents", json_schema_extra={"label": "Benötigt Dokumente"})
+ requiresWebResearch: bool = Field(default=False, description="Whether request requires web research", json_schema_extra={"label": "Benötigt Web-Recherche"})
+ requiresAnalysis: bool = Field(default=False, description="Whether request requires analysis", json_schema_extra={"label": "Benötigt Analyse"})
+ expectedOutputFormat: Optional[str] = Field(None, description="Expected output format", json_schema_extra={"label": "Erwartetes Ausgabeformat"})
+ expectedOutputType: Optional[str] = Field(None, description="Expected output type: answer, document, analysis", json_schema_extra={"label": "Erwarteter Ausgabetyp"})
+@i18nModel("Verständnis-Ergebnis")
class UnderstandingResult(BaseModel):
"""Result from initial understanding phase (combined AI call)"""
parameters: Dict[str, Any] = Field(
default_factory=dict,
- description="Basic parameters (language, format, detail level)"
+ description="Basic parameters (language, format, detail level)",
+ json_schema_extra={"label": "Parameter"},
)
intention: Dict[str, Any] = Field(
default_factory=dict,
- description="User intention (primaryGoal, secondaryGoals, intentionType)"
+ description="User intention (primaryGoal, secondaryGoals, intentionType)",
+ json_schema_extra={"label": "Absicht"},
)
context: Dict[str, Any] = Field(
default_factory=dict,
- description="Extracted context (topics, requirements, constraints)"
+ description="Extracted context (topics, requirements, constraints)",
+ json_schema_extra={"label": "Kontext"},
)
documentReferences: List[Dict[str, Any]] = Field(
default_factory=list,
- description="Document references with purpose and relevance"
+ description="Document references with purpose and relevance",
+ json_schema_extra={"label": "Dokumentenreferenzen"},
)
tasks: List["TaskDefinition"] = Field( # Forward reference
default_factory=list,
- description="Task definitions with deliverables"
+ description="Task definitions with deliverables",
+ json_schema_extra={"label": "Aufgaben"},
)
+@i18nModel("Aufgabenbeschreibung")
class TaskDefinition(BaseModel):
"""Task definition from understanding phase"""
- id: str = Field(description="Task identifier")
- objective: str = Field(description="Task objective")
+ id: str = Field(description="Task identifier", json_schema_extra={"label": "Aufgaben-ID"})
+ objective: str = Field(description="Task objective", json_schema_extra={"label": "Ziel"})
deliverable: Dict[str, Any] = Field(
- description="Deliverable specification (type, format, style, detailLevel)"
+ description="Deliverable specification (type, format, style, detailLevel)",
+ json_schema_extra={"label": "Lieferobjekt"},
)
- requiresWebResearch: bool = Field(default=False, description="Whether task requires web research")
- requiresDocumentAnalysis: bool = Field(default=False, description="Whether task requires document analysis")
- requiresContentGeneration: bool = Field(default=True, description="Whether task requires content generation")
+ requiresWebResearch: bool = Field(default=False, description="Whether task requires web research", json_schema_extra={"label": "Benötigt Web-Recherche"})
+ requiresDocumentAnalysis: bool = Field(default=False, description="Whether task requires document analysis", json_schema_extra={"label": "Benötigt Dokumentenanalyse"})
+ requiresContentGeneration: bool = Field(default=True, description="Whether task requires content generation", json_schema_extra={"label": "Benötigt Inhaltserstellung"})
requiredDocuments: List[str] = Field(
default_factory=list,
- description="Document references needed for this task"
+ description="Document references needed for this task",
+ json_schema_extra={"label": "Benötigte Dokumente"},
)
extractionOptions: Optional[Any] = Field( # ExtractionOptions - forward reference
None,
- description="Extraction options for document processing (determined dynamically based on task and document characteristics)"
+ description="Extraction options for document processing (determined dynamically based on task and document characteristics)",
+ json_schema_extra={"label": "Extraktionsoptionen"},
)
-class TaskResult(BaseModel):
+@i18nModel("Workflow-Aufgabenergebnis")
+class WorkflowTaskResult(BaseModel):
"""Result from task execution"""
- taskId: str = Field(description="Task identifier")
- actionResult: Any = Field(description="ActionResult from task execution") # ActionResult - forward reference
-
-
-# Register model labels for UI
-registerModelLabels(
- "RequestContext",
- {"en": "Request Context", "fr": "Contexte de la demande"},
- {
- "originalPrompt": {"en": "Original Prompt", "fr": "Invite originale"},
- "documents": {"en": "Documents", "fr": "Documents"},
- "userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
- "detectedComplexity": {"en": "Detected Complexity", "fr": "Complexité détectée"},
- "requiresDocuments": {"en": "Requires Documents", "fr": "Nécessite des documents"},
- "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
- "requiresAnalysis": {"en": "Requires Analysis", "fr": "Nécessite une analyse"},
- "expectedOutputFormat": {"en": "Expected Output Format", "fr": "Format de sortie attendu"},
- "expectedOutputType": {"en": "Expected Output Type", "fr": "Type de sortie attendu"},
- },
-)
-
-registerModelLabels(
- "UnderstandingResult",
- {"en": "Understanding Result", "fr": "Résultat de compréhension"},
- {
- "parameters": {"en": "Parameters", "fr": "Paramètres"},
- "intention": {"en": "Intention", "fr": "Intention"},
- "context": {"en": "Context", "fr": "Contexte"},
- "documentReferences": {"en": "Document References", "fr": "Références de documents"},
- "tasks": {"en": "Tasks", "fr": "Tâches"},
- },
-)
-
-registerModelLabels(
- "TaskDefinition",
- {"en": "Task Definition", "fr": "Définition de tâche"},
- {
- "id": {"en": "Task ID", "fr": "ID de la tâche"},
- "objective": {"en": "Objective", "fr": "Objectif"},
- "deliverable": {"en": "Deliverable", "fr": "Livrable"},
- "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
- "requiresDocumentAnalysis": {"en": "Requires Document Analysis", "fr": "Nécessite une analyse de documents"},
- "requiresContentGeneration": {"en": "Requires Content Generation", "fr": "Nécessite une génération de contenu"},
- "requiredDocuments": {"en": "Required Documents", "fr": "Documents requis"},
- "extractionOptions": {"en": "Extraction Options", "fr": "Options d'extraction"},
- },
-)
-
-registerModelLabels(
- "TaskResult",
- {"en": "Task Result", "fr": "Résultat de tâche"},
- {
- "taskId": {"en": "Task ID", "fr": "ID de la tâche"},
- "actionResult": {"en": "Action Result", "fr": "Résultat de l'action"},
- },
-)
-
-registerModelLabels(
- "RequestContext",
- {"en": "Request Context", "fr": "Contexte de la demande"},
- {
- "originalPrompt": {"en": "Original Prompt", "fr": "Invite originale"},
- "documents": {"en": "Documents", "fr": "Documents"},
- "userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
- "detectedComplexity": {"en": "Detected Complexity", "fr": "Complexité détectée"},
- "requiresDocuments": {"en": "Requires Documents", "fr": "Nécessite des documents"},
- "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
- "requiresAnalysis": {"en": "Requires Analysis", "fr": "Nécessite une analyse"},
- "expectedOutputFormat": {"en": "Expected Output Format", "fr": "Format de sortie attendu"},
- "expectedOutputType": {"en": "Expected Output Type", "fr": "Type de sortie attendu"},
- },
-)
-
-registerModelLabels(
- "UnderstandingResult",
- {"en": "Understanding Result", "fr": "Résultat de compréhension"},
- {
- "parameters": {"en": "Parameters", "fr": "Paramètres"},
- "intention": {"en": "Intention", "fr": "Intention"},
- "context": {"en": "Context", "fr": "Contexte"},
- "documentReferences": {"en": "Document References", "fr": "Références de documents"},
- "tasks": {"en": "Tasks", "fr": "Tâches"},
- },
-)
-
-registerModelLabels(
- "TaskDefinition",
- {"en": "Task Definition", "fr": "Définition de tâche"},
- {
- "id": {"en": "Task ID", "fr": "ID de la tâche"},
- "objective": {"en": "Objective", "fr": "Objectif"},
- "deliverable": {"en": "Deliverable", "fr": "Livrable"},
- "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
- "requiresDocumentAnalysis": {"en": "Requires Document Analysis", "fr": "Nécessite une analyse de documents"},
- "requiresContentGeneration": {"en": "Requires Content Generation", "fr": "Nécessite une génération de contenu"},
- "requiredDocuments": {"en": "Required Documents", "fr": "Documents requis"},
- "extractionOptions": {"en": "Extraction Options", "fr": "Options d'extraction"},
- },
-)
-
-registerModelLabels(
- "TaskResult",
- {"en": "Task Result", "fr": "Résultat de tâche"},
- {
- "taskId": {"en": "Task ID", "fr": "ID de la tâche"},
- "actionResult": {"en": "Action Result", "fr": "Résultat de l'action"},
- },
-)
-
-# Register model labels for UI
-registerModelLabels(
- "ActionDefinition",
- {"en": "Action Definition", "fr": "Définition d'action"},
- {
- "action": {"en": "Action", "fr": "Action"},
- "actionObjective": {"en": "Action Objective", "fr": "Objectif de l'action"},
- "parametersContext": {"en": "Parameters Context", "fr": "Contexte des paramètres"},
- "learnings": {"en": "Learnings", "fr": "Apprentissages"},
- "documentList": {"en": "Document List", "fr": "Liste de documents"},
- "connectionReference": {"en": "Connection Reference", "fr": "Référence de connexion"},
- "parameters": {"en": "Parameters", "fr": "Paramètres"},
- },
-)
-
-registerModelLabels(
- "AiResponse",
- {"en": "AI Response", "fr": "Réponse IA"},
- {
- "content": {"en": "Content", "fr": "Contenu"},
- "metadata": {"en": "Metadata", "fr": "Métadonnées"},
- "documents": {"en": "Documents", "fr": "Documents"},
- },
-)
-
-registerModelLabels(
- "AiResponseMetadata",
- {"en": "AI Response Metadata", "fr": "Métadonnées de réponse IA"},
- {
- "title": {"en": "Title", "fr": "Titre"},
- "filename": {"en": "Filename", "fr": "Nom de fichier"},
- "operationType": {"en": "Operation Type", "fr": "Type d'opération"},
- "schemaVersion": {"en": "Schema Version", "fr": "Version du schéma"},
- "extractionMethod": {"en": "Extraction Method", "fr": "Méthode d'extraction"},
- "sourceDocuments": {"en": "Source Documents", "fr": "Documents sources"},
- },
-)
-
-registerModelLabels(
- "DocumentData",
- {"en": "Document Data", "fr": "Données de document"},
- {
- "documentName": {"en": "Document Name", "fr": "Nom du document"},
- "documentData": {"en": "Document Data", "fr": "Données du document"},
- "mimeType": {"en": "MIME Type", "fr": "Type MIME"},
- },
-)
-
-registerModelLabels(
- "RequestContext",
- {"en": "Request Context", "fr": "Contexte de requête"},
- {
- "originalPrompt": {"en": "Original Prompt", "fr": "Invite originale"},
- "documents": {"en": "Documents", "fr": "Documents"},
- "userLanguage": {"en": "User Language", "fr": "Langue de l'utilisateur"},
- "detectedComplexity": {"en": "Detected Complexity", "fr": "Complexité détectée"},
- "requiresDocuments": {"en": "Requires Documents", "fr": "Nécessite des documents"},
- "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
- "requiresAnalysis": {"en": "Requires Analysis", "fr": "Nécessite une analyse"},
- },
-)
-
-registerModelLabels(
- "UnderstandingResult",
- {"en": "Understanding Result", "fr": "Résultat de compréhension"},
- {
- "parameters": {"en": "Parameters", "fr": "Paramètres"},
- "intention": {"en": "Intention", "fr": "Intention"},
- "context": {"en": "Context", "fr": "Contexte"},
- "documentReferences": {"en": "Document References", "fr": "Références de documents"},
- "tasks": {"en": "Tasks", "fr": "Tâches"},
- },
-)
-
-registerModelLabels(
- "TaskDefinition",
- {"en": "Task Definition", "fr": "Définition de tâche"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "objective": {"en": "Objective", "fr": "Objectif"},
- "deliverable": {"en": "Deliverable", "fr": "Livrable"},
- "requiresWebResearch": {"en": "Requires Web Research", "fr": "Nécessite une recherche web"},
- "requiresDocumentAnalysis": {"en": "Requires Document Analysis", "fr": "Nécessite une analyse de document"},
- "requiresContentGeneration": {"en": "Requires Content Generation", "fr": "Nécessite une génération de contenu"},
- "requiredDocuments": {"en": "Required Documents", "fr": "Documents requis"},
- "extractionOptions": {"en": "Extraction Options", "fr": "Options d'extraction"},
- },
-)
-
-registerModelLabels(
- "TaskResult",
- {"en": "Task Result", "fr": "Résultat de tâche"},
- {
- "taskId": {"en": "Task ID", "fr": "ID de tâche"},
- "actionResult": {"en": "Action Result", "fr": "Résultat d'action"},
- },
-)
+ taskId: str = Field(description="Task identifier", json_schema_extra={"label": "Aufgaben-ID"})
+ actionResult: Any = Field(description="ActionResult from task execution", json_schema_extra={"label": "Aktionsergebnis"}) # ActionResult - forward reference
diff --git a/modules/datamodels/datamodelWorkflowActions.py b/modules/datamodels/datamodelWorkflowActions.py
index 8bac1fd5..09c07c14 100644
--- a/modules/datamodels/datamodelWorkflowActions.py
+++ b/modules/datamodels/datamodelWorkflowActions.py
@@ -6,85 +6,97 @@ from typing import Optional, Any, Union, List, Dict, Callable, Awaitable
from pydantic import BaseModel, Field
from modules.datamodels.datamodelChat import ActionResult
from modules.shared.frontendTypes import FrontendType
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
+@i18nModel("Workflow-Aktionsparameter")
class WorkflowActionParameter(BaseModel):
"""
Parameter schema definition for a workflow action.
-
+
This defines the structure and UI rendering for a single action parameter,
NOT the actual parameter values (those are in ActionDefinition.parameters).
"""
- name: str = Field(description="Parameter name")
- type: str = Field(description="Python type as string: 'str', 'int', 'bool', 'List[str]', etc.")
- frontendType: FrontendType = Field(description="UI rendering type (from global FrontendType enum)")
+ name: str = Field(
+ description="Parameter name",
+ json_schema_extra={"label": "Name"},
+ )
+ type: str = Field(
+ description="Python type as string: 'str', 'int', 'bool', 'List[str]', etc.",
+ json_schema_extra={"label": "Typ"},
+ )
+ frontendType: FrontendType = Field(
+ description="UI rendering type (from global FrontendType enum)",
+ json_schema_extra={"label": "Frontend-Typ"},
+ )
frontendOptions: Optional[Union[str, List[str]]] = Field(
None,
- description="Options for select/multiselect/custom types. String reference (e.g., 'user.connection') or list of strings (e.g., ['txt', 'json']). For custom types, this is automatically set to the API endpoint."
+ description="Options for select/multiselect/custom types. String reference (e.g., 'user.connection') or list of strings (e.g., ['txt', 'json']). For custom types, this is automatically set to the API endpoint.",
+ json_schema_extra={"label": "Frontend-Optionen"},
+ )
+ required: bool = Field(
+ False,
+ description="Whether parameter is required",
+ json_schema_extra={"label": "Pflichtfeld"},
+ )
+ default: Optional[Any] = Field(
+ None,
+ description="Default value",
+ json_schema_extra={"label": "Standard"},
+ )
+ description: str = Field(
+ "",
+ description="Parameter description",
+ json_schema_extra={"label": "Beschreibung"},
)
- required: bool = Field(False, description="Whether parameter is required")
- default: Optional[Any] = Field(None, description="Default value")
- description: str = Field("", description="Parameter description")
validation: Optional[Dict[str, Any]] = Field(
None,
- description="Validation rules (e.g., {'min': 1, 'max': 100})"
+ description="Validation rules (e.g., {'min': 1, 'max': 100})",
+ json_schema_extra={"label": "Validierung"},
)
+@i18nModel("Workflow-Aktionsdefinition")
class WorkflowActionDefinition(BaseModel):
"""
Complete schema definition of a workflow action.
-
+
This defines the metadata, parameters, and execution function for an action.
This is different from datamodelWorkflow.ActionDefinition which contains
actual execution values (action, actionObjective, parameters with values).
-
+
This class defines the ACTION SCHEMA, not the execution plan.
"""
actionId: str = Field(
- description="Unique action identifier for RBAC (format: 'module.actionName', e.g., 'outlook.readEmails')"
+ description="Unique action identifier for RBAC (format: 'module.actionName', e.g., 'outlook.readEmails')",
+ json_schema_extra={"label": "Aktions-ID"},
+ )
+ description: str = Field(
+ description="Action description",
+ json_schema_extra={"label": "Beschreibung"},
)
- description: str = Field(description="Action description")
parameters: Dict[str, WorkflowActionParameter] = Field(
default_factory=dict,
- description="Parameter schema definitions"
+ description="Parameter schema definitions",
+ json_schema_extra={"label": "Parameter"},
)
execute: Optional[Callable] = Field(
None,
- description="Execution function - async function that takes parameters dict and returns ActionResult. Set dynamically."
+ description="Execution function - async function that takes parameters dict and returns ActionResult. Set dynamically.",
+ json_schema_extra={"label": "Ausfuehrung"},
+ )
+ category: Optional[str] = Field(
+ None,
+ description="Action category for grouping",
+ json_schema_extra={"label": "Kategorie"},
+ )
+ tags: List[str] = Field(
+ default_factory=list,
+ description="Tags for search/filtering",
+ json_schema_extra={"label": "Tags"},
+ )
+ dynamicMode: bool = Field(
+ False,
+ description="Whether this action is available in dynamic workflow mode (only tagged actions are visible in action planning and refinement prompts)",
+ json_schema_extra={"label": "Dynamischer Modus"},
)
- category: Optional[str] = Field(None, description="Action category for grouping")
- tags: List[str] = Field(default_factory=list, description="Tags for search/filtering")
- dynamicMode: bool = Field(False, description="Whether this action is available in dynamic workflow mode (only tagged actions are visible in action planning and refinement prompts)")
-
-
-# Register model labels for UI
-registerModelLabels(
- "WorkflowActionDefinition",
- {"en": "Workflow Action Definition", "fr": "Définition d'action de workflow"},
- {
- "actionId": {"en": "Action ID", "fr": "ID d'action"},
- "description": {"en": "Description", "fr": "Description"},
- "parameters": {"en": "Parameters", "fr": "Paramètres"},
- "category": {"en": "Category", "fr": "Catégorie"},
- "tags": {"en": "Tags", "fr": "Étiquettes"},
- "dynamicMode": {"en": "Dynamic Mode", "fr": "Mode dynamique"},
- },
-)
-
-registerModelLabels(
- "WorkflowActionParameter",
- {"en": "Workflow Action Parameter", "fr": "Paramètre d'action de workflow"},
- {
- "name": {"en": "Name", "fr": "Nom"},
- "type": {"en": "Type", "fr": "Type"},
- "frontendType": {"en": "Frontend Type", "fr": "Type frontend"},
- "frontendOptions": {"en": "Frontend Options", "fr": "Options frontend"},
- "required": {"en": "Required", "fr": "Requis"},
- "default": {"en": "Default", "fr": "Par défaut"},
- "description": {"en": "Description", "fr": "Description"},
- "validation": {"en": "Validation", "fr": "Validation"},
- },
-)
-
diff --git a/modules/demoConfigs/__init__.py b/modules/demoConfigs/__init__.py
new file mode 100644
index 00000000..9e64cf96
--- /dev/null
+++ b/modules/demoConfigs/__init__.py
@@ -0,0 +1,49 @@
+"""
+Demo Configs — Auto-Discovery Module
+
+Scans this folder for Python files that contain subclasses of _BaseDemoConfig
+and exposes them via _getAvailableDemoConfigs().
+"""
+
+import importlib
+import inspect
+import logging
+import pkgutil
+from typing import Dict
+
+from modules.demoConfigs._baseDemoConfig import _BaseDemoConfig
+
+logger = logging.getLogger(__name__)
+
+_configCache: Dict[str, _BaseDemoConfig] = {}
+
+
+def _getAvailableDemoConfigs() -> Dict[str, _BaseDemoConfig]:
+ """Return a dict of code -> instance for every discovered demo config."""
+ if _configCache:
+ return _configCache
+
+ package = __name__
+ packagePath = __path__
+
+ for importer, moduleName, isPkg in pkgutil.iter_modules(packagePath):
+ if moduleName.startswith("_"):
+ continue
+ try:
+ module = importlib.import_module(f"{package}.{moduleName}")
+ for name, obj in inspect.getmembers(module, inspect.isclass):
+ if issubclass(obj, _BaseDemoConfig) and obj is not _BaseDemoConfig:
+ instance = obj()
+ if instance.code:
+ _configCache[instance.code] = instance
+ logger.info(f"Discovered demo config: {instance.code} ({instance.label})")
+ except Exception as e:
+ logger.warning(f"Failed to load demo config module '{moduleName}': {e}")
+
+ return _configCache
+
+
+def _getDemoConfigByCode(code: str) -> _BaseDemoConfig | None:
+ """Get a specific demo config by its code."""
+ configs = _getAvailableDemoConfigs()
+ return configs.get(code)
diff --git a/modules/demoConfigs/_baseDemoConfig.py b/modules/demoConfigs/_baseDemoConfig.py
new file mode 100644
index 00000000..4d9bdd59
--- /dev/null
+++ b/modules/demoConfigs/_baseDemoConfig.py
@@ -0,0 +1,38 @@
+"""
+Base class for demo configurations.
+
+Each demo config file in this folder extends _BaseDemoConfig and provides
+idempotent load() and remove() methods for setting up / tearing down
+a complete demo environment (mandates, users, features, test data, etc.).
+"""
+
+import logging
+from abc import ABC, abstractmethod
+from typing import Dict, Any
+
+logger = logging.getLogger(__name__)
+
+
+class _BaseDemoConfig(ABC):
+ """Abstract base for demo configurations."""
+
+ code: str = ""
+ label: str = ""
+ description: str = ""
+
+ @abstractmethod
+ def load(self, db) -> Dict[str, Any]:
+ """Create all demo data (idempotent). Returns summary dict."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def remove(self, db) -> Dict[str, Any]:
+ """Remove all demo data. Returns summary dict."""
+ raise NotImplementedError
+
+ def toDict(self) -> Dict[str, Any]:
+ return {
+ "code": self.code,
+ "label": self.label,
+ "description": self.description,
+ }
diff --git a/modules/demoConfigs/investorDemo2026.py b/modules/demoConfigs/investorDemo2026.py
new file mode 100644
index 00000000..bac3d4fe
--- /dev/null
+++ b/modules/demoConfigs/investorDemo2026.py
@@ -0,0 +1,350 @@
+"""
+Investor Demo April 2026
+
+Creates a complete demo environment with two mandates, one user,
+and all feature instances needed for the investor live demo.
+
+Mandates:
+ - HappyLife AG (happylife) — workspace, trustee(RMA), graphEditor, chatbot, neutralization
+ - Alpina Treuhand AG (alpina) — workspace, trustee(RMA), graphEditor, neutralization
+
+User:
+ - Patrick Helvetia (p.motsch@poweron.swiss) — SysAdmin, member of both mandates
+"""
+
+import json
+import logging
+import uuid
+from typing import Dict, Any, Optional, List
+
+from modules.demoConfigs._baseDemoConfig import _BaseDemoConfig
+
+logger = logging.getLogger(__name__)
+
+_DEMO_PREFIX = "demo-inv2026"
+
+_MANDATE_HAPPYLIFE = {
+ "name": "happylife",
+ "label": "HappyLife AG",
+}
+
+_MANDATE_ALPINA = {
+ "name": "alpina-treuhand",
+ "label": "Alpina Treuhand AG",
+}
+
+_USER = {
+ "username": "patrick.helvetia",
+ "email": "p.motsch@poweron.swiss",
+ "fullName": "Patrick Helvetia",
+ "password": "patrick.helvetia",
+ "language": "en",
+}
+
+_FEATURES_HAPPYLIFE = ["workspace", "trustee", "graphicalEditor", "chatbot", "neutralization"]
+_FEATURES_ALPINA = ["workspace", "trustee", "graphicalEditor", "neutralization"]
+
+
+class InvestorDemo2026(_BaseDemoConfig):
+ code = "investor-demo-2026"
+ label = "Investor Demo April 2026"
+ description = (
+ "Two mandates (HappyLife AG + Alpina Treuhand AG), one SysAdmin user, "
+ "trustee with RMA, workspace, graph editor, chatbot, and neutralization."
+ )
+
+ # ------------------------------------------------------------------
+ # load
+ # ------------------------------------------------------------------
+ def load(self, db) -> Dict[str, Any]:
+ summary: Dict[str, Any] = {"created": [], "skipped": [], "errors": []}
+
+ try:
+ mandateIdHappy = self._ensureMandate(db, _MANDATE_HAPPYLIFE, summary)
+ mandateIdAlpina = self._ensureMandate(db, _MANDATE_ALPINA, summary)
+
+ userId = self._ensureUser(db, summary)
+
+ if mandateIdHappy:
+ self._ensureMembership(db, userId, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], summary)
+ self._ensureFeatures(db, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], _FEATURES_HAPPYLIFE, summary)
+
+ if mandateIdAlpina:
+ self._ensureMembership(db, userId, mandateIdAlpina, _MANDATE_ALPINA["label"], summary)
+ self._ensureFeatures(db, mandateIdAlpina, _MANDATE_ALPINA["label"], _FEATURES_ALPINA, summary)
+
+ self._ensureTrusteeRmaConfig(db, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], summary)
+ self._ensureTrusteeRmaConfig(db, mandateIdAlpina, _MANDATE_ALPINA["label"], summary)
+
+ self._ensureNeutralizationConfig(db, mandateIdHappy, userId, summary)
+ self._ensureNeutralizationConfig(db, mandateIdAlpina, userId, summary)
+
+ self._ensureBilling(db, mandateIdHappy, _MANDATE_HAPPYLIFE["label"], summary)
+ self._ensureBilling(db, mandateIdAlpina, _MANDATE_ALPINA["label"], summary)
+
+ except Exception as e:
+ logger.error(f"Demo load failed: {e}", exc_info=True)
+ summary["errors"].append(str(e))
+
+ return summary
+
+ # ------------------------------------------------------------------
+ # remove
+ # ------------------------------------------------------------------
+ def remove(self, db) -> Dict[str, Any]:
+ summary: Dict[str, Any] = {"removed": [], "errors": []}
+
+ from modules.datamodels.datamodelUam import Mandate, UserInDB
+ from modules.datamodels.datamodelMembership import UserMandate
+
+ for mandateDef in [_MANDATE_HAPPYLIFE, _MANDATE_ALPINA]:
+ try:
+ existing = db.getRecordset(Mandate, recordFilter={"name": mandateDef["name"]})
+ for m in existing:
+ mid = m.get("id")
+ db.recordDelete(Mandate, mid)
+ summary["removed"].append(f"Mandate {mandateDef['label']} ({mid})")
+ logger.info(f"Removed mandate {mandateDef['label']} ({mid})")
+ except Exception as e:
+ summary["errors"].append(f"Remove mandate {mandateDef['label']}: {e}")
+
+ try:
+ existing = db.getRecordset(UserInDB, recordFilter={"username": _USER["username"]})
+ for u in existing:
+ uid = u.get("id")
+ memberships = db.getRecordset(UserMandate, recordFilter={"userId": uid})
+ for mem in memberships:
+ try:
+ db.recordDelete(UserMandate, mem.get("id"))
+ except Exception:
+ pass
+ db.recordDelete(UserInDB, uid)
+ summary["removed"].append(f"User {_USER['username']} ({uid})")
+ logger.info(f"Removed user {_USER['username']} ({uid})")
+ except Exception as e:
+ summary["errors"].append(f"Remove user: {e}")
+
+ self._removeLanguageSet(db, "es", summary)
+
+ return summary
+
+ # ------------------------------------------------------------------
+ # helpers
+ # ------------------------------------------------------------------
+
+ def _ensureMandate(self, db, mandateDef: Dict, summary: Dict) -> Optional[str]:
+ from modules.datamodels.datamodelUam import Mandate
+ from modules.interfaces.interfaceBootstrap import copySystemRolesToMandate
+
+ existing = db.getRecordset(Mandate, recordFilter={"name": mandateDef["name"]})
+ if existing:
+ mid = existing[0].get("id")
+ summary["skipped"].append(f"Mandate {mandateDef['label']} exists ({mid})")
+ return mid
+
+ mandate = Mandate(name=mandateDef["name"], label=mandateDef["label"], enabled=True)
+ created = db.recordCreate(Mandate, mandate)
+ mid = created.get("id")
+ logger.info(f"Created mandate {mandateDef['label']} ({mid})")
+ summary["created"].append(f"Mandate {mandateDef['label']}")
+
+ copySystemRolesToMandate(db, mid)
+ return mid
+
+ def _ensureUser(self, db, summary: Dict) -> Optional[str]:
+ from modules.datamodels.datamodelUam import UserInDB, AuthAuthority
+ from passlib.context import CryptContext
+
+ existing = db.getRecordset(UserInDB, recordFilter={"username": _USER["username"]})
+ if existing:
+ uid = existing[0].get("id")
+ summary["skipped"].append(f"User {_USER['username']} exists ({uid})")
+ return uid
+
+ pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
+ user = UserInDB(
+ username=_USER["username"],
+ email=_USER["email"],
+ fullName=_USER["fullName"],
+ enabled=True,
+ language=_USER["language"],
+ isSysAdmin=True,
+ authenticationAuthority=AuthAuthority.LOCAL,
+ hashedPassword=pwdContext.hash(_USER["password"]),
+ )
+ created = db.recordCreate(UserInDB, user)
+ uid = created.get("id")
+ logger.info(f"Created user {_USER['username']} ({uid})")
+ summary["created"].append(f"User {_USER['fullName']}")
+ return uid
+
+ def _ensureMembership(self, db, userId: str, mandateId: str, mandateLabel: str, summary: Dict):
+ from modules.datamodels.datamodelMembership import UserMandate, UserMandateRole
+ from modules.datamodels.datamodelRbac import Role
+
+ existing = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mandateId})
+ if existing:
+ userMandateId = existing[0].get("id")
+ summary["skipped"].append(f"Membership {_USER['username']} -> {mandateLabel} exists")
+ else:
+ um = UserMandate(userId=userId, mandateId=mandateId, enabled=True)
+ created = db.recordCreate(UserMandate, um)
+ userMandateId = created.get("id")
+ summary["created"].append(f"Membership {_USER['username']} -> {mandateLabel}")
+ logger.info(f"Created membership {_USER['username']} -> {mandateLabel}")
+
+ adminRoles = db.getRecordset(Role, recordFilter={"mandateId": mandateId, "roleLabel": "admin"})
+ if adminRoles:
+ adminRoleId = adminRoles[0].get("id")
+ existingRole = db.getRecordset(UserMandateRole, recordFilter={"userMandateId": userMandateId, "roleId": adminRoleId})
+ if not existingRole:
+ umr = UserMandateRole(userMandateId=userMandateId, roleId=adminRoleId)
+ db.recordCreate(UserMandateRole, umr)
+ logger.info(f"Assigned admin role in {mandateLabel}")
+
+ def _ensureFeatures(self, db, mandateId: str, mandateLabel: str, featureCodes: List[str], summary: Dict):
+ from modules.interfaces.interfaceFeatures import getFeatureInterface
+
+ fi = getFeatureInterface(db)
+ existingInstances = fi.getFeatureInstancesForMandate(mandateId)
+ existingCodes = {
+ (inst.featureCode if hasattr(inst, "featureCode") else inst.get("featureCode", ""))
+ for inst in existingInstances
+ }
+
+ for code in featureCodes:
+ if code in existingCodes:
+ summary["skipped"].append(f"Feature {code} in {mandateLabel} exists")
+ continue
+ try:
+ fi.createFeatureInstance(
+ featureCode=code,
+ mandateId=mandateId,
+ label=f"{code} ({mandateLabel})",
+ enabled=True,
+ copyTemplateRoles=True,
+ )
+ summary["created"].append(f"Feature {code} in {mandateLabel}")
+ logger.info(f"Created feature instance {code} in {mandateLabel}")
+ except Exception as e:
+ summary["errors"].append(f"Feature {code} in {mandateLabel}: {e}")
+ logger.error(f"Failed to create feature {code} in {mandateLabel}: {e}")
+
+ def _ensureTrusteeRmaConfig(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):
+ if not mandateId:
+ return
+
+ from modules.datamodels.datamodelFeatures import FeatureInstance
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
+ from modules.shared.configuration import APP_CONFIG, encryptValue
+
+ instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId, "featureCode": "trustee"})
+ if not instances:
+ summary["skipped"].append(f"No trustee instance in {mandateLabel} for RMA config")
+ return
+
+ instanceId = instances[0].get("id")
+
+ existing = db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": instanceId})
+ if existing:
+ summary["skipped"].append(f"RMA config for {mandateLabel} exists")
+ return
+
+ apiBaseUrl = APP_CONFIG.get("Demo_RMA_ApiBaseUrl", "")
+ clientName = APP_CONFIG.get("Demo_RMA_ClientName", "")
+ apiKey = APP_CONFIG.get("Demo_RMA_ApiKey", "")
+
+ if not apiBaseUrl or not apiKey:
+ summary["errors"].append(
+ f"RMA credentials missing in config.ini (Demo_RMA_ApiBaseUrl, Demo_RMA_ClientName, Demo_RMA_ApiKey) for {mandateLabel}"
+ )
+ return
+
+ plainConfig = {
+ "apiBaseUrl": apiBaseUrl,
+ "clientName": clientName,
+ "apiKey": apiKey,
+ }
+
+ configRecord = {
+ "id": str(uuid.uuid4()),
+ "featureInstanceId": instanceId,
+ "connectorType": "rma",
+ "displayLabel": "Run My Accounts",
+ "encryptedConfig": encryptValue(json.dumps(plainConfig), keyName="accountingConfig"),
+ "isActive": True,
+ "mandateId": mandateId,
+ }
+ db.recordCreate(TrusteeAccountingConfig, configRecord)
+ summary["created"].append(f"RMA accounting config for {mandateLabel}")
+ logger.info(f"Created RMA accounting config for {mandateLabel}")
+
+ def _ensureNeutralizationConfig(self, db, mandateId: Optional[str], userId: Optional[str], summary: Dict):
+ if not mandateId or not userId:
+ return
+
+ from modules.datamodels.datamodelFeatures import FeatureInstance
+
+ instances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId, "featureCode": "neutralization"})
+ if not instances:
+ return
+
+ instanceId = instances[0].get("id")
+
+ try:
+ from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
+
+ existing = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": instanceId})
+ if existing:
+ summary["skipped"].append(f"Neutralization config for mandate {mandateId} exists")
+ return
+
+ config = DataNeutraliserConfig(
+ featureInstanceId=instanceId,
+ mandateId=mandateId,
+ userId=userId,
+ enabled=True,
+ scope="featureInstance",
+ )
+ db.recordCreate(DataNeutraliserConfig, config)
+ summary["created"].append(f"Neutralization config for mandate {mandateId}")
+ logger.info(f"Created neutralization config for mandate {mandateId}")
+ except Exception as e:
+ summary["errors"].append(f"Neutralization config: {e}")
+
+ def _ensureBilling(self, db, mandateId: Optional[str], mandateLabel: str, summary: Dict):
+ if not mandateId:
+ return
+ try:
+ from modules.interfaces.interfaceDbBilling import _getRootInterface
+ from modules.datamodels.datamodelBilling import BillingSettings
+
+ billingInterface = _getRootInterface()
+ existingSettings = billingInterface.getSettings(mandateId)
+ if existingSettings:
+ summary["skipped"].append(f"Billing for {mandateLabel} exists")
+ return
+
+ settings = BillingSettings(
+ mandateId=mandateId,
+ warningThresholdPercent=10.0,
+ notifyOnWarning=True,
+ )
+ billingInterface.db.recordCreate(BillingSettings, settings)
+ summary["created"].append(f"Billing settings for {mandateLabel}")
+ logger.info(f"Created billing settings for {mandateLabel}")
+ except Exception as e:
+ summary["errors"].append(f"Billing for {mandateLabel}: {e}")
+
+ def _removeLanguageSet(self, db, code: str, summary: Dict):
+ """Remove a language set if it was created during demo (e.g. 'es' from UC4)."""
+ try:
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+
+ existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if existing:
+ db.recordDelete(UiLanguageSet, code)
+ summary["removed"].append(f"Language set '{code}'")
+ logger.info(f"Removed language set '{code}'")
+ except Exception as e:
+ logger.debug(f"Could not remove language set '{code}': {e}")
diff --git a/modules/features/automation/datamodelFeatureAutomation.py b/modules/features/automation/datamodelFeatureAutomation.py
deleted file mode 100644
index 8ea4a300..00000000
--- a/modules/features/automation/datamodelFeatureAutomation.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""Automation models: AutomationDefinition, AutomationTemplate."""
-
-from typing import List, Dict, Any, Optional
-from pydantic import BaseModel, Field
-from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
-from modules.datamodels.datamodelUtils import TextMultilingual
-import uuid
-
-
-class AutomationDefinition(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- mandateId: str = Field(description="Mandate ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- featureInstanceId: str = Field(description="ID of the feature instance this automation belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- label: str = Field(description="User-friendly name", json_schema_extra={"frontend_type": "text", "frontend_required": True})
- schedule: str = Field(description="Cron schedule pattern", json_schema_extra={"frontend_type": "select", "frontend_required": True, "frontend_options": [
- {"value": "0 */4 * * *", "label": {"en": "Every 4 hours", "fr": "Toutes les 4 heures"}},
- {"value": "0 22 * * *", "label": {"en": "Daily at 22:00", "fr": "Quotidien à 22:00"}},
- {"value": "0 10 * * 1", "label": {"en": "Weekly Monday 10:00", "fr": "Hebdomadaire lundi 10:00"}}
- ]})
- template: str = Field(description="JSON template with placeholders (format: {{KEY:PLACEHOLDER_NAME}})", json_schema_extra={"frontend_type": "textarea", "frontend_required": True})
- placeholders: Dict[str, str] = Field(default_factory=dict, description="Dictionary of placeholder key/value pairs (e.g., {'connectionName': 'MyConnection', 'sharepointFolderNameSource': '/folder/path', 'webResearchUrl': 'https://...', 'webResearchPrompt': '...', 'documentPrompt': '...'})", json_schema_extra={"frontend_type": "textarea"})
- active: bool = Field(default=False, description="Whether automation should be launched in event handler", json_schema_extra={"frontend_type": "checkbox", "frontend_required": False})
- eventId: Optional[str] = Field(None, description="Event ID from event management (None if not registered)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- status: Optional[str] = Field(None, description="Status: 'active' if event is registered, 'inactive' if not (computed, readonly)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- executionLogs: List[Dict[str, Any]] = Field(default_factory=list, description="List of execution logs, each containing timestamp, workflowId, status, and messages", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- allowedProviders: List[str] = Field(default_factory=list, description="List of allowed AICore providers (e.g., 'anthropic', 'openai'). Empty means all RBAC-permitted providers are allowed.", json_schema_extra={"frontend_type": "multiselect", "frontend_readonly": False, "frontend_required": False})
-
-
-registerModelLabels(
- "AutomationDefinition",
- {"en": "Automation Definition", "ge": "Automatisierungs-Definition", "fr": "Définition d'automatisation"},
- {
- "id": {"en": "ID", "ge": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "ge": "Mandanten-ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "ge": "Feature-Instanz-ID", "fr": "ID de l'instance de fonctionnalité"},
- "label": {"en": "Label", "ge": "Bezeichnung", "fr": "Libellé"},
- "schedule": {"en": "Schedule", "ge": "Zeitplan", "fr": "Planification"},
- "template": {"en": "Template", "ge": "Vorlage", "fr": "Modèle"},
- "placeholders": {"en": "Placeholders", "ge": "Platzhalter", "fr": "Espaces réservés"},
- "active": {"en": "Active", "ge": "Aktiv", "fr": "Actif"},
- "eventId": {"en": "Event ID", "ge": "Event-ID", "fr": "ID de l'événement"},
- "status": {"en": "Status", "ge": "Status", "fr": "Statut"},
- "executionLogs": {"en": "Execution Logs", "ge": "Ausführungsprotokolle", "fr": "Journaux d'exécution"},
- "allowedProviders": {"en": "Allowed Providers", "ge": "Erlaubte Provider", "fr": "Fournisseurs autorisés"},
- },
-)
-
-
-class AutomationTemplate(PowerOnModel):
- """Automation-Vorlage ohne scharfe Placeholder-Werte (DB-persistiert).
-
- System-Templates (isSystem=True): Nur durch SysAdmin aenderbar. Alle User koennen lesen.
- Instance-Templates (isSystem=False, featureInstanceId gesetzt): CRUD durch Instance-Admin/Editor.
- """
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Primary key",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True}
- )
- label: TextMultilingual = Field(
- description="Template name (multilingual)",
- json_schema_extra={"frontend_type": "multilingual", "frontend_required": True}
- )
- overview: Optional[TextMultilingual] = Field(
- None,
- description="Short description (multilingual)",
- json_schema_extra={"frontend_type": "multilingual", "frontend_required": False}
- )
- template: str = Field(
- description="JSON workflow structure with {{KEY:...}} placeholders",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": True}
- )
- isSystem: bool = Field(
- default=False,
- description="System template (only SysAdmin can modify, all users can read)",
- json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": True, "frontend_required": False}
- )
- featureInstanceId: Optional[str] = Field(
- None,
- description="Feature instance ID (null for system templates, set for instance-scoped templates)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}
- )
-registerModelLabels(
- "AutomationTemplate",
- {"en": "Automation Template", "ge": "Automation-Vorlage", "fr": "Modèle d'automatisation"},
- {
- "id": {"en": "ID", "ge": "ID", "fr": "ID"},
- "label": {"en": "Label", "ge": "Bezeichnung", "fr": "Libellé"},
- "overview": {"en": "Overview", "ge": "Übersicht", "fr": "Aperçu"},
- "template": {"en": "Template", "ge": "Vorlage", "fr": "Modèle"},
- "isSystem": {"en": "System Template", "ge": "System-Vorlage", "fr": "Modèle système"},
- "featureInstanceId": {"en": "Feature Instance", "ge": "Feature-Instanz", "fr": "Instance de fonctionnalité"},
- },
-)
diff --git a/modules/features/automation/interfaceFeatureAutomation.py b/modules/features/automation/interfaceFeatureAutomation.py
deleted file mode 100644
index a4f90a51..00000000
--- a/modules/features/automation/interfaceFeatureAutomation.py
+++ /dev/null
@@ -1,872 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Interface for Automation feature - manages AutomationDefinition and AutomationTemplate.
-Uses the PostgreSQL connector for data access with user/mandate filtering.
-"""
-
-import logging
-import uuid
-import math
-from typing import Dict, Any, List, Optional, Union
-
-from modules.security.rbac import RbacClass
-from modules.datamodels.datamodelRbac import AccessRuleContext
-from modules.datamodels.datamodelUam import AccessLevel, User
-from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition, AutomationTemplate
-from modules.connectors.connectorDbPostgre import DatabaseConnector
-from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResult
-from modules.interfaces.interfaceRbac import getRecordsetWithRBAC, buildDataObjectKey
-
-from modules.shared.configuration import APP_CONFIG
-
-logger = logging.getLogger(__name__)
-
-
-def _automationDefinitionPayload(data: Dict[str, Any]) -> Dict[str, Any]:
- """Strip connector/enrichment keys; only fields defined on AutomationDefinition."""
- allowed = AutomationDefinition.model_fields.keys()
- return {k: v for k, v in (data or {}).items() if k in allowed}
-
-
-# Singleton factory for Automation instances
-_automationInterfaces = {}
-
-
-class AutomationObjects:
- """
- Interface for Automation database operations.
- Manages AutomationDefinition and AutomationTemplate with RBAC support.
- """
-
- def __init__(self, currentUser: User, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None):
- self.currentUser = currentUser
- self.mandateId = mandateId
- self.featureInstanceId = featureInstanceId
- self.userId = currentUser.id if currentUser else None
-
- # Initialize database with proper configuration
- self._initializeDatabase()
-
- # Initialize RBAC - AccessRules are in poweron_app, not poweron_automation!
- from modules.security.rootAccess import getRootDbAppConnector
- dbApp = getRootDbAppConnector()
- self.rbac = RbacClass(self.db, dbApp=dbApp)
-
- # Update database context
- self.db.updateContext(self.userId)
-
- def _initializeDatabase(self):
- """Initializes the database connection with proper configuration."""
- # Get configuration values
- dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data")
- dbDatabase = "poweron_automation"
- dbUser = APP_CONFIG.get("DB_USER")
- dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
- dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
-
- # Create database connector with full configuration
- self.db = DatabaseConnector(
- dbHost=dbHost,
- dbDatabase=dbDatabase,
- dbUser=dbUser,
- dbPassword=dbPassword,
- dbPort=dbPort,
- userId=self.userId,
- )
-
- logger.debug(f"Automation database initialized for user {self.userId}")
-
- def setUserContext(self, currentUser: User, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None):
- """Update user context for the interface."""
- self.currentUser = currentUser
- self.mandateId = mandateId
- self.featureInstanceId = featureInstanceId
- self.userId = currentUser.id if currentUser else None
- if hasattr(self.db, 'updateContext'):
- self.db.updateContext(self.userId)
-
- def checkRbacPermission(self, model, action: str, recordId: str = None) -> bool:
- """Check RBAC permission for a specific action on a model."""
- objectKey = buildDataObjectKey(model.__name__)
- permissions = self.rbac.getUserPermissions(
- user=self.currentUser,
- context=AccessRuleContext.DATA,
- item=objectKey,
- mandateId=self.mandateId,
- featureInstanceId=self.featureInstanceId
- )
-
- accessLevel = getattr(permissions, action, AccessLevel.NONE)
-
- if accessLevel == AccessLevel.ALL:
- return True
- elif accessLevel == AccessLevel.GROUP:
- return True
- elif accessLevel == AccessLevel.MY:
- if recordId:
- record = self.db.getRecordset(model, recordFilter={"id": recordId})
- if record:
- return record[0].get("sysCreatedBy") == self.userId
- else:
- return False # Record not found = no access
- return True # No recordId needed (e.g., for CREATE)
- return False
-
- # =========================================================================
- # AutomationDefinition CRUD methods
- # =========================================================================
-
- def _computeAutomationStatus(self, automation: Dict[str, Any]) -> str:
- """Compute status field based on eventId presence"""
- eventId = automation.get("eventId")
- return "Running" if eventId else "Idle"
-
- def _enrichAutomationsWithUserAndMandate(self, automations: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """
- Batch enrich automations with user names, mandate names and feature instance labels.
- Uses direct DB lookup (no RBAC) because this is purely cosmetic enrichment —
- the user already has RBAC-verified access to the automations themselves.
- """
- if not automations:
- return automations
-
- # Collect all unique IDs
- userIds = set()
- mandateIds = set()
- featureInstanceIds = set()
-
- for automation in automations:
- createdBy = automation.get("sysCreatedBy")
- if createdBy:
- userIds.add(createdBy)
-
- mandateId = automation.get("mandateId")
- if mandateId:
- mandateIds.add(mandateId)
-
- featureInstanceId = automation.get("featureInstanceId")
- if featureInstanceId:
- featureInstanceIds.add(featureInstanceId)
-
- # Use root DB connector for display-only lookups (no RBAC needed)
- usersMap = {}
- mandatesMap = {}
- featureInstancesMap = {}
- try:
- from modules.datamodels.datamodelUam import UserInDB, Mandate
- from modules.datamodels.datamodelFeatures import FeatureInstance
- from modules.security.rootAccess import getRootDbAppConnector
- dbAppConn = getRootDbAppConnector()
-
- # Batch fetch user display names
- if userIds:
- for userId in userIds:
- users = dbAppConn.getRecordset(UserInDB, recordFilter={"id": userId})
- if users:
- user = users[0]
- displayName = user.get("fullName") or user.get("username") or user.get("email") or None
- if displayName:
- usersMap[userId] = displayName
-
- # Batch fetch mandate display names
- if mandateIds:
- for mandateId in mandateIds:
- mandates = dbAppConn.getRecordset(Mandate, recordFilter={"id": mandateId})
- if mandates:
- label = mandates[0].get("label") or mandates[0].get("name") or None
- if label:
- mandatesMap[mandateId] = label
-
- # Batch fetch feature instance labels
- if featureInstanceIds:
- for fiId in featureInstanceIds:
- instances = dbAppConn.getRecordset(FeatureInstance, recordFilter={"id": fiId})
- if instances:
- fi = instances[0]
- label = fi.get("label") or fi.get("featureCode") or None
- if label:
- featureInstancesMap[fiId] = label
- except Exception as e:
- logger.warning(f"Could not enrich automations with display names: {e}")
-
- # Enrich each automation with the fetched data
- # SECURITY: Never show a fallback name — if lookup fails, show empty string
- for automation in automations:
- createdBy = automation.get("sysCreatedBy")
- automation["sysCreatedByUserName"] = usersMap.get(createdBy, "") if createdBy else ""
-
- mandateId = automation.get("mandateId")
- automation["mandateName"] = mandatesMap.get(mandateId, "") if mandateId else ""
-
- featureInstanceId = automation.get("featureInstanceId")
- automation["featureInstanceName"] = featureInstancesMap.get(featureInstanceId, "") if featureInstanceId else ""
-
- return automations
-
- def _enrichAutomationWithUserAndMandate(self, automation: Dict[str, Any]) -> Dict[str, Any]:
- """
- Enrich a single automation with user name and mandate name for display.
- For multiple automations, use _enrichAutomationsWithUserAndMandate for better performance.
- """
- return self._enrichAutomationsWithUserAndMandate([automation])[0]
-
- def getAllAutomationDefinitions(self, pagination: Optional[PaginationParams] = None) -> Union[List[Dict[str, Any]], PaginatedResult]:
- """
- Returns automation definitions based on user access level.
- Supports optional pagination, sorting, and filtering.
- Computes status field for each automation.
- """
- # AutomationDefinitions can belong to any feature instance within a mandate.
- # Filter by mandateId only — not by featureInstanceId — to show all definitions across features.
- filteredAutomations = getRecordsetWithRBAC(
- self.db,
- AutomationDefinition,
- self.currentUser,
- mandateId=self.mandateId
- )
-
- # Compute status for each automation and normalize executionLogs
- for automation in filteredAutomations:
- automation["status"] = self._computeAutomationStatus(automation)
- # Ensure executionLogs is always a list, not None
- if automation.get("executionLogs") is None:
- automation["executionLogs"] = []
-
- # Batch enrich with user and mandate names
- self._enrichAutomationsWithUserAndMandate(filteredAutomations)
-
- # If no pagination requested, return all items
- if pagination is None:
- return filteredAutomations
-
- # Apply filtering (if filters provided)
- if pagination.filters:
- filteredAutomations = self._applyFilters(filteredAutomations, pagination.filters)
-
- # Apply sorting (in order of sortFields)
- if pagination.sort:
- filteredAutomations = self._applySorting(filteredAutomations, pagination.sort)
-
- # Count total items after filters
- totalItems = len(filteredAutomations)
- totalPages = math.ceil(totalItems / pagination.pageSize) if totalItems > 0 else 0
-
- # Apply pagination (skip/limit)
- startIdx = (pagination.page - 1) * pagination.pageSize
- endIdx = startIdx + pagination.pageSize
- pagedAutomations = filteredAutomations[startIdx:endIdx]
-
- return PaginatedResult(
- items=pagedAutomations,
- totalItems=totalItems,
- totalPages=totalPages
- )
-
- def _applyFilters(self, items: List[Dict], filters: Dict[str, Any]) -> List[Dict]:
- """Apply filters to a list of items."""
- if not filters:
- return items
-
- filtered = []
- for item in items:
- match = True
- for key, value in filters.items():
- itemValue = item.get(key)
- if isinstance(value, str) and isinstance(itemValue, str):
- if value.lower() not in itemValue.lower():
- match = False
- break
- elif str(itemValue).lower() != str(value).lower():
- match = False
- break
- if match:
- filtered.append(item)
- return filtered
-
- def _applySorting(self, items: List[Dict], sortFields: List[Dict]) -> List[Dict]:
- """Apply sorting to a list of items."""
- if not sortFields:
- return items
-
- for sortField in reversed(sortFields):
- field = sortField.get("field", "")
- direction = sortField.get("direction", "asc")
- reverse = direction.lower() == "desc"
- items = sorted(items, key=lambda x: x.get(field, ""), reverse=reverse)
-
- return items
-
- def getAutomationDefinition(self, automationId: str, includeSystemFields: bool = False) -> Optional[AutomationDefinition]:
- """Returns an automation definition by ID if user has access, with computed status.
-
- Args:
- automationId: ID of the automation to get
- includeSystemFields: If True, returns raw dict with system fields (sysCreatedBy, etc).
- If False (default), returns Pydantic model without system fields.
- """
- try:
- # AutomationDefinitions can belong to any feature instance within a mandate.
- # Filter by mandateId only — not by featureInstanceId.
- filtered = getRecordsetWithRBAC(
- self.db,
- AutomationDefinition,
- self.currentUser,
- recordFilter={"id": automationId},
- mandateId=self.mandateId
- )
-
- if not filtered:
- return None
-
- automation = filtered[0]
- automation["status"] = self._computeAutomationStatus(automation)
- # Ensure executionLogs is always a list, not None
- if automation.get("executionLogs") is None:
- automation["executionLogs"] = []
- # Enrich with user and mandate names
- self._enrichAutomationWithUserAndMandate(automation)
-
- # For internal use (execution), return raw dict with system fields
- if includeSystemFields:
- # Return as simple namespace object so getattr works
- class AutomationWithSystemFields:
- def __init__(self, data):
- for key, value in data.items():
- setattr(self, key, value)
- return AutomationWithSystemFields(automation)
-
- # Clean metadata fields and return Pydantic model
- cleanedRecord = _automationDefinitionPayload(automation)
- return AutomationDefinition(**cleanedRecord)
- except Exception as e:
- logger.error(f"Error getting automation definition: {str(e)}")
- return None
-
- def createAutomationDefinition(self, automationData: Dict[str, Any]) -> AutomationDefinition:
- """Creates a new automation definition, then triggers sync."""
- try:
- # Ensure ID is present
- if "id" not in automationData or not automationData["id"]:
- automationData["id"] = str(uuid.uuid4())
-
- # Ensure mandateId and featureInstanceId are set for proper data isolation
- if "mandateId" not in automationData or not automationData.get("mandateId"):
- # Use request context mandateId, or fall back to Root mandate
- effectiveMandateId = self.mandateId
- if not effectiveMandateId:
- # Fall back to Root mandate (first mandate in system)
- try:
- from modules.datamodels.datamodelUam import Mandate
- from modules.security.rootAccess import getRootDbAppConnector
- dbAppConn = getRootDbAppConnector()
- allMandates = dbAppConn.getRecordset(Mandate)
- if allMandates:
- effectiveMandateId = allMandates[0].get("id")
- logger.debug(f"createAutomationDefinition: Using Root mandate {effectiveMandateId}")
- except Exception as e:
- logger.warning(f"Could not get Root mandate: {e}")
- automationData["mandateId"] = effectiveMandateId
- if "featureInstanceId" not in automationData:
- automationData["featureInstanceId"] = self.featureInstanceId
-
- # Ensure database connector has correct userId context
- if not self.userId:
- logger.error(f"createAutomationDefinition: userId is not set! Cannot set sysCreatedBy. currentUser={self.currentUser}")
- elif hasattr(self.db, 'updateContext'):
- try:
- self.db.updateContext(self.userId)
- logger.debug(f"createAutomationDefinition: Updated database context with userId={self.userId}")
- except Exception as e:
- logger.warning(f"Could not update database context: {e}")
-
- # Create automation in database
- createdAutomation = self.db.recordCreate(AutomationDefinition, automationData)
-
- # Compute status
- createdAutomation["status"] = self._computeAutomationStatus(createdAutomation)
- # Ensure executionLogs is always a list, not None
- if createdAutomation.get("executionLogs") is None:
- createdAutomation["executionLogs"] = []
-
- # Trigger automation change callback
- self._notifyAutomationChanged()
-
- # Clean metadata fields and return Pydantic model
- cleanedRecord = _automationDefinitionPayload(createdAutomation)
- return AutomationDefinition(**cleanedRecord)
- except Exception as e:
- logger.error(f"Error creating automation definition: {str(e)}")
- raise
-
- def _saveExecutionLog(self, automationId: str, executionLogs: List[Dict[str, Any]]) -> None:
- """
- Save execution logs to an automation definition WITHOUT RBAC check.
-
- This is a system-level operation: when a user executes an automation,
- the execution log must be saved regardless of whether the user has
- 'update' permission on the AutomationDefinition. The user already
- proved they have execute/read access by loading the automation.
- """
- try:
- self.db.recordModify(AutomationDefinition, automationId, {"executionLogs": executionLogs})
- logger.debug(f"Saved execution log for automation {automationId}")
- except Exception as e:
- logger.warning(f"Could not save execution log for automation {automationId}: {e}")
-
- def updateAutomationDefinition(self, automationId: str, automationData: Dict[str, Any]) -> AutomationDefinition:
- """Updates an automation definition, then triggers sync."""
- try:
- # Check access
- existing = self.getAutomationDefinition(automationId)
- if not existing:
- raise PermissionError(f"No access to automation {automationId}")
-
- if not self.checkRbacPermission(AutomationDefinition, "update", automationId):
- raise PermissionError(f"No permission to modify automation {automationId}")
-
- automationData.pop("executionLogs", None)
-
- # If deactivating: immediately remove scheduler job (don't rely on async callback)
- isBeingDeactivated = "active" in automationData and not automationData["active"]
- if isBeingDeactivated:
- existingEventId = getattr(existing, "eventId", None) if not isinstance(existing, dict) else existing.get("eventId")
- if existingEventId:
- try:
- from modules.shared.eventManagement import eventManager
- eventManager.remove(existingEventId)
- logger.info(f"Removed scheduler job {existingEventId} (automation deactivated)")
- except Exception as e:
- logger.warning(f"Could not remove scheduler job {existingEventId}: {e}")
- automationData["eventId"] = None
-
- # Update automation in database
- updatedAutomation = self.db.recordModify(AutomationDefinition, automationId, automationData)
-
- # Compute status
- updatedAutomation["status"] = self._computeAutomationStatus(updatedAutomation)
- # Ensure executionLogs is always a list, not None
- if updatedAutomation.get("executionLogs") is None:
- updatedAutomation["executionLogs"] = []
-
- # Trigger automation change callback
- self._notifyAutomationChanged()
-
- # Clean metadata fields and return Pydantic model
- cleanedRecord = _automationDefinitionPayload(updatedAutomation)
- return AutomationDefinition(**cleanedRecord)
- except Exception as e:
- logger.error(f"Error updating automation definition: {str(e)}")
- raise
-
- def deleteAutomationDefinition(self, automationId: str) -> bool:
- """Deletes an automation definition, then triggers sync."""
- try:
- # Check access
- existing = self.getAutomationDefinition(automationId)
- if not existing:
- raise PermissionError(f"No access to automation {automationId}")
-
- if not self.checkRbacPermission(AutomationDefinition, "delete", automationId):
- raise PermissionError(f"No permission to delete automation {automationId}")
-
- # Delete automation from database
- self.db.recordDelete(AutomationDefinition, automationId)
-
- # Trigger automation change callback
- self._notifyAutomationChanged()
-
- return True
- except Exception as e:
- logger.error(f"Error deleting automation definition: {str(e)}")
- raise
-
- def getAllAutomationDefinitionsWithRBAC(self, user: User) -> List[Dict[str, Any]]:
- """
- Get all automation definitions filtered by RBAC for a specific user.
- This method encapsulates getRecordsetWithRBAC() to avoid exposing the connector.
-
- Args:
- user: User object for RBAC filtering
-
- Returns:
- List of automation definition dictionaries filtered by RBAC
- """
- return getRecordsetWithRBAC(
- self.db,
- AutomationDefinition,
- user,
- mandateId=self.mandateId,
- featureInstanceId=self.featureInstanceId
- )
-
- # =========================================================================
- # AutomationTemplate CRUD methods
- # =========================================================================
-
- def getAllAutomationTemplates(self, pagination: Optional[PaginationParams] = None) -> Union[List[Dict[str, Any]], PaginatedResult]:
- """
- Returns automation templates: system templates + instance templates for current instance.
- System templates (isSystem=True) are always included (read-only for non-SysAdmin).
- Instance templates (featureInstanceId matches) are included with RBAC filtering.
- """
- # Load ALL templates and filter in Python.
- # Reason: seeded/legacy templates may have isSystem=NULL (not False/True),
- # which breaks SQL equality filters (NULL != True AND NULL != False).
- allTemplates = self.db.getRecordset(AutomationTemplate)
-
- filteredTemplates = []
- for t in allTemplates:
- isSystem = t.get("isSystem")
- fid = t.get("featureInstanceId")
-
- if isSystem is True:
- # System templates — always visible to all users
- filteredTemplates.append(t)
- elif fid and fid == self.featureInstanceId:
- # Instance templates — scoped to current feature instance
- filteredTemplates.append(t)
- elif not fid:
- # Global/legacy templates (no featureInstanceId) — visible to all users
- filteredTemplates.append(t)
-
- # Enrich with user names
- self._enrichTemplatesWithUserName(filteredTemplates)
-
- # If no pagination requested, return all items
- if pagination is None:
- return filteredTemplates
-
- # Apply filtering (if filters provided)
- if pagination.filters:
- filteredTemplates = self._applyFilters(filteredTemplates, pagination.filters)
-
- # Apply sorting (in order of sortFields)
- if pagination.sort:
- filteredTemplates = self._applySorting(filteredTemplates, pagination.sort)
-
- # Count total items after filters
- totalItems = len(filteredTemplates)
- totalPages = math.ceil(totalItems / pagination.pageSize) if totalItems > 0 else 0
-
- # Apply pagination (skip/limit)
- startIdx = (pagination.page - 1) * pagination.pageSize
- endIdx = startIdx + pagination.pageSize
- pagedTemplates = filteredTemplates[startIdx:endIdx]
-
- return PaginatedResult(
- items=pagedTemplates,
- totalItems=totalItems,
- totalPages=totalPages
- )
-
- def _enrichTemplatesWithUserName(self, templates: List[Dict[str, Any]]) -> None:
- """Batch enrich templates with creator user names."""
- if not templates:
- return
-
- # Collect unique user IDs
- userIds = set()
- for template in templates:
- createdBy = template.get("sysCreatedBy")
- if createdBy:
- userIds.add(createdBy)
-
- if not userIds:
- return
-
- # Batch fetch users
- try:
- from modules.datamodels.datamodelUam import UserInDB
- from modules.security.rootAccess import getRootDbAppConnector
- dbAppConn = getRootDbAppConnector()
-
- userNameMap = {}
- for userId in userIds:
- users = dbAppConn.getRecordset(UserInDB, recordFilter={"id": userId})
- if users:
- user = users[0]
- displayName = user.get("fullName") or user.get("username") or user.get("email") or None
- if displayName:
- userNameMap[userId] = displayName
-
- # Apply to templates — SECURITY: no fallback, empty if not found
- for template in templates:
- createdBy = template.get("sysCreatedBy")
- template["sysCreatedByUserName"] = userNameMap.get(createdBy, "") if createdBy else ""
- except Exception as e:
- logger.warning(f"Could not enrich templates with user names: {e}")
-
- def getAutomationTemplate(self, templateId: str) -> Optional[Dict[str, Any]]:
- """Returns an automation template by ID (system templates always accessible, instance templates scoped)."""
- try:
- records = self.db.getRecordset(
- AutomationTemplate,
- recordFilter={"id": templateId}
- )
-
- if not records:
- return None
-
- template = records[0]
-
- # System templates are readable by everyone
- if template.get("isSystem"):
- self._enrichTemplatesWithUserName([template])
- return template
-
- # Instance templates: must belong to current feature instance
- templateInstanceId = template.get("featureInstanceId")
- if templateInstanceId and self.featureInstanceId and str(templateInstanceId) != str(self.featureInstanceId):
- return None # Not in this instance
-
- self._enrichTemplatesWithUserName([template])
- return template
- except Exception as e:
- logger.error(f"Error getting automation template: {str(e)}")
- return None
-
- def createAutomationTemplate(self, templateData: Dict[str, Any], isSysAdmin: bool = False) -> Dict[str, Any]:
- """Creates a new automation template.
-
- System templates (isSystem=True) can only be created by SysAdmin.
- Instance templates get featureInstanceId from context.
- """
- try:
- # Ensure ID is present
- if "id" not in templateData or not templateData["id"]:
- templateData["id"] = str(uuid.uuid4())
-
- # System template protection
- if templateData.get("isSystem") and not isSysAdmin:
- raise PermissionError("Only SysAdmin can create system templates")
-
- # Set featureInstanceId for non-system templates
- if not templateData.get("isSystem"):
- templateData["featureInstanceId"] = self.featureInstanceId
- templateData["isSystem"] = False
-
- # RBAC check (for non-system templates)
- if not isSysAdmin and not self.checkRbacPermission(AutomationTemplate, "create"):
- raise PermissionError("No permission to create template")
-
- # Ensure database connector has correct userId context
- if self.userId and hasattr(self.db, 'updateContext'):
- try:
- self.db.updateContext(self.userId)
- except Exception as e:
- logger.warning(f"Could not update database context: {e}")
-
- # Convert template field to string if it's a dict (frontend may send parsed JSON)
- if "template" in templateData and isinstance(templateData["template"], dict):
- import json
- templateData["template"] = json.dumps(templateData["template"])
-
- # Validate through Pydantic model to ensure proper type conversion
- validatedTemplate = AutomationTemplate(**templateData)
-
- # Create template in database using model_dump for proper serialization
- createdTemplate = self.db.recordCreate(AutomationTemplate, validatedTemplate.model_dump())
-
- return createdTemplate
- except Exception as e:
- logger.error(f"Error creating automation template: {str(e)}")
- raise
-
- def updateAutomationTemplate(self, templateId: str, templateData: Dict[str, Any], isSysAdmin: bool = False) -> Dict[str, Any]:
- """Updates an automation template.
-
- System templates can only be updated by SysAdmin.
- """
- try:
- # Check access
- existing = self.getAutomationTemplate(templateId)
- if not existing:
- raise PermissionError(f"No access to template {templateId}")
-
- # System template protection
- if existing.get("isSystem") and not isSysAdmin:
- raise PermissionError("Only SysAdmin can modify system templates")
-
- if not isSysAdmin and not self.checkRbacPermission(AutomationTemplate, "update", templateId):
- raise PermissionError(f"No permission to modify template {templateId}")
-
- # Prevent changing isSystem/featureInstanceId
- templateData.pop("isSystem", None)
- templateData.pop("featureInstanceId", None)
-
- # Convert template field to string if it's a dict (frontend may send parsed JSON)
- if "template" in templateData and isinstance(templateData["template"], dict):
- import json
- templateData["template"] = json.dumps(templateData["template"])
-
- # Merge existing data with update data for partial updates
- mergedData = {**existing, **templateData}
- mergedData["id"] = templateId # Ensure ID is preserved
-
- # Validate through Pydantic model to ensure proper type conversion
- validatedTemplate = AutomationTemplate(**mergedData)
-
- # Update template in database using model_dump for proper serialization
- updatedTemplate = self.db.recordModify(AutomationTemplate, templateId, validatedTemplate.model_dump())
-
- return updatedTemplate
- except Exception as e:
- logger.error(f"Error updating automation template: {str(e)}")
- raise
-
- def deleteAutomationTemplate(self, templateId: str, isSysAdmin: bool = False) -> bool:
- """Deletes an automation template.
-
- System templates can only be deleted by SysAdmin.
- """
- try:
- # Check access
- existing = self.getAutomationTemplate(templateId)
- if not existing:
- return False
-
- # System template protection
- if existing.get("isSystem") and not isSysAdmin:
- raise PermissionError("Only SysAdmin can delete system templates")
-
- if not isSysAdmin and not self.checkRbacPermission(AutomationTemplate, "delete", templateId):
- raise PermissionError(f"No permission to delete template {templateId}")
-
- # Delete template from database
- self.db.recordDelete(AutomationTemplate, templateId)
-
- return True
- except Exception as e:
- logger.error(f"Error deleting automation template: {str(e)}")
- raise
-
- def duplicateAutomationTemplate(self, templateId: str) -> Dict[str, Any]:
- """Duplicates a template into the current feature instance.
-
- Creates a copy with new ID, isSystem=False, featureInstanceId from context.
- Works for both system and instance templates.
- """
- try:
- existing = self.getAutomationTemplate(templateId)
- if not existing:
- raise PermissionError(f"Template {templateId} not found")
-
- # RBAC check for creating templates
- if not self.checkRbacPermission(AutomationTemplate, "create"):
- raise PermissionError("No permission to create templates")
-
- # Build duplicate data
- duplicateData = {
- "id": str(uuid.uuid4()),
- "label": existing.get("label", {}),
- "overview": existing.get("overview"),
- "template": existing.get("template", ""),
- "isSystem": False,
- "featureInstanceId": self.featureInstanceId,
- }
-
- # Append "(Kopie)" to label
- label = duplicateData["label"]
- if isinstance(label, dict):
- for lang in label:
- if label[lang]:
- label[lang] = f"{label[lang]} (Kopie)"
-
- # Ensure database connector has correct userId context
- if self.userId and hasattr(self.db, 'updateContext'):
- self.db.updateContext(self.userId)
-
- validatedTemplate = AutomationTemplate(**duplicateData)
- createdTemplate = self.db.recordCreate(AutomationTemplate, validatedTemplate.model_dump())
-
- logger.info(f"Duplicated template {templateId} -> {duplicateData['id']}")
- return createdTemplate
- except Exception as e:
- logger.error(f"Error duplicating template: {str(e)}")
- raise
-
- def duplicateAutomationDefinition(self, definitionId: str) -> Dict[str, Any]:
- """Duplicates an automation definition within the same feature instance.
-
- Creates a copy with new ID, active=False, no eventId.
- """
- try:
- existing = self.getAutomationDefinition(definitionId)
- if not existing:
- raise PermissionError(f"Definition {definitionId} not found")
-
- # RBAC check for creating definitions
- if not self.checkRbacPermission(AutomationDefinition, "create"):
- raise PermissionError("No permission to create definitions")
-
- # getAutomationDefinition returns Pydantic model; convert to dict for .get() access
- existing_data = existing.model_dump() if hasattr(existing, "model_dump") else existing
-
- # Build duplicate data
- duplicateData = {
- "id": str(uuid.uuid4()),
- "mandateId": existing_data.get("mandateId"),
- "featureInstanceId": existing_data.get("featureInstanceId"),
- "label": f"{existing_data.get('label', '')} (Kopie)",
- "schedule": existing_data.get("schedule", ""),
- "template": existing_data.get("template", ""),
- "placeholders": existing_data.get("placeholders", {}),
- "active": False,
- "eventId": None,
- "status": None,
- "executionLogs": [],
- "allowedProviders": existing_data.get("allowedProviders", []),
- }
-
- # Ensure database connector has correct userId context
- if self.userId and hasattr(self.db, 'updateContext'):
- self.db.updateContext(self.userId)
-
- validatedDefinition = AutomationDefinition(**duplicateData)
- createdDefinition = self.db.recordCreate(AutomationDefinition, validatedDefinition.model_dump())
-
- logger.info(f"Duplicated definition {definitionId} -> {duplicateData['id']}")
- return createdDefinition
- except Exception as e:
- logger.error(f"Error duplicating definition: {str(e)}")
- raise
-
- def _notifyAutomationChanged(self):
- """Notify registered callbacks about automation changes (decoupled from features).
- Sync-safe: works from both sync and async contexts."""
- try:
- from modules.shared.callbackRegistry import callbackRegistry
- # Trigger callbacks without knowing which features are listening
- callbackRegistry.trigger('automation.changed', self)
- except Exception as e:
- logger.error(f"Error notifying automation change: {str(e)}")
-
-
-def getInterface(currentUser: Optional[User] = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None) -> 'AutomationObjects':
- """
- Returns an AutomationObjects instance for the current user.
- Handles initialization of database and records.
-
- Args:
- currentUser: The authenticated user
- mandateId: The mandate ID from RequestContext (X-Mandate-Id header).
- featureInstanceId: The feature instance ID from RequestContext (X-Feature-Instance-Id header).
- """
- if not currentUser:
- raise ValueError("Invalid user context: user is required")
-
- effectiveMandateId = str(mandateId) if mandateId else None
- effectiveFeatureInstanceId = str(featureInstanceId) if featureInstanceId else None
-
- # Create context key including featureInstanceId for proper isolation
- contextKey = f"automation_{effectiveMandateId}_{effectiveFeatureInstanceId}_{currentUser.id}"
-
- # Create new instance if not exists
- if contextKey not in _automationInterfaces:
- _automationInterfaces[contextKey] = AutomationObjects(currentUser, mandateId=effectiveMandateId, featureInstanceId=effectiveFeatureInstanceId)
- else:
- # Update user context if needed
- _automationInterfaces[contextKey].setUserContext(currentUser, mandateId=effectiveMandateId, featureInstanceId=effectiveFeatureInstanceId)
-
- return _automationInterfaces[contextKey]
diff --git a/modules/features/automation/mainAutomation.py b/modules/features/automation/mainAutomation.py
deleted file mode 100644
index d56804fd..00000000
--- a/modules/features/automation/mainAutomation.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Automation Feature Container - Main Module.
-Handles feature initialization and RBAC catalog registration.
-"""
-
-import logging
-from typing import Dict, List, Any, Optional
-
-logger = logging.getLogger(__name__)
-
-# Feature metadata
-FEATURE_CODE = "automation"
-FEATURE_LABEL = {"en": "Automation", "de": "Automatisierung", "fr": "Automatisation"}
-FEATURE_ICON = "mdi-cog-clockwise"
-
-# UI Objects for RBAC catalog
-UI_OBJECTS = [
- {
- "objectKey": "ui.feature.automation.definitions",
- "label": {"en": "Automation Definitions", "de": "Automatisierungs-Definitionen", "fr": "Définitions d'automatisation"},
- "meta": {"area": "definitions"}
- },
- {
- "objectKey": "ui.feature.automation.templates",
- "label": {"en": "Templates", "de": "Vorlagen", "fr": "Modèles"},
- "meta": {"area": "templates"}
- },
-]
-
-# Resource Objects for RBAC catalog
-RESOURCE_OBJECTS = [
- {
- "objectKey": "resource.feature.automation.create",
- "label": {"en": "Create Automation", "de": "Automatisierung erstellen", "fr": "Créer automatisation"},
- "meta": {"endpoint": "/api/automations", "method": "POST"}
- },
- {
- "objectKey": "resource.feature.automation.update",
- "label": {"en": "Update Automation", "de": "Automatisierung aktualisieren", "fr": "Modifier automatisation"},
- "meta": {"endpoint": "/api/automations/{automationId}", "method": "PUT"}
- },
- {
- "objectKey": "resource.feature.automation.delete",
- "label": {"en": "Delete Automation", "de": "Automatisierung löschen", "fr": "Supprimer automatisation"},
- "meta": {"endpoint": "/api/automations/{automationId}", "method": "DELETE"}
- },
- {
- "objectKey": "resource.feature.automation.execute",
- "label": {"en": "Execute Automation", "de": "Automatisierung ausführen", "fr": "Exécuter automatisation"},
- "meta": {"endpoint": "/api/automations/{automationId}/execute", "method": "POST"}
- },
-]
-
-# Template roles for this feature
-TEMPLATE_ROLES = [
- {
- "roleLabel": "automation-admin",
- "description": {
- "en": "Automation Administrator - Full access to automation configuration and execution",
- "de": "Automatisierungs-Administrator - Vollzugriff auf Automatisierungs-Konfiguration und Ausführung",
- "fr": "Administrateur automatisation - Accès complet à la configuration et exécution"
- },
- "accessRules": [
- # Full UI access
- {"context": "UI", "item": None, "view": True},
- # Full DATA access
- {"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
- ]
- },
- {
- "roleLabel": "automation-editor",
- "description": {
- "en": "Automation Editor - Create and modify automations",
- "de": "Automatisierungs-Editor - Automatisierungen erstellen und bearbeiten",
- "fr": "Éditeur automatisation - Créer et modifier les automatisations"
- },
- "accessRules": [
- # UI access to definitions and templates - vollqualifizierte ObjectKeys
- {"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
- {"context": "UI", "item": "ui.feature.automation.templates", "view": True},
- {"context": "UI", "item": "ui.feature.automation.logs", "view": True},
- # Group-level DATA access
- {"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "n"},
- ]
- },
- {
- "roleLabel": "automation-user",
- "description": {
- "en": "Automation User - Create and manage own automations",
- "de": "Automatisierungs-Benutzer - Eigene Automatisierungen erstellen und verwalten",
- "fr": "Utilisateur automatisation - Créer et gérer ses propres automatisations"
- },
- "accessRules": [
- {"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
- {"context": "UI", "item": "ui.feature.automation.templates", "view": True},
- {"context": "UI", "item": "ui.feature.automation.logs", "view": True},
- {"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
- ]
- },
- {
- "roleLabel": "automation-viewer",
- "description": {
- "en": "Automation Viewer - View automations and execution results",
- "de": "Automatisierungs-Betrachter - Automatisierungen und Ausführungsergebnisse einsehen",
- "fr": "Visualiseur automatisation - Consulter les automatisations et résultats"
- },
- "accessRules": [
- # UI access to view only
- {"context": "UI", "item": "ui.feature.automation.definitions", "view": True},
- {"context": "UI", "item": "ui.feature.automation.logs", "view": True},
- # Read-only DATA access (my level)
- {"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
- ]
- },
-]
-
-# Service requirements - services this feature needs from the service center
-REQUIRED_SERVICES = [
- {"serviceKey": "chat", "meta": {"usage": "Workflow CRUD, messages, logs"}},
- {"serviceKey": "ai", "meta": {"usage": "AI planning for workflow execution"}},
- {"serviceKey": "utils", "meta": {"usage": "Timestamps, utilities"}},
- {"serviceKey": "billing", "meta": {"usage": "AI call billing"}},
- {"serviceKey": "extraction", "meta": {"usage": "Workflow method actions"}},
- {"serviceKey": "sharepoint", "meta": {"usage": "SharePoint actions (listDocuments, uploadDocument, etc.)"}},
- {"serviceKey": "generation", "meta": {"usage": "Action completion messages, document creation from results"}},
-]
-
-
-def getRequiredServiceKeys() -> List[str]:
- """Return list of service keys this feature requires."""
- return [s["serviceKey"] for s in REQUIRED_SERVICES]
-
-
-def getAutomationServices(
- user,
- mandateId: Optional[str] = None,
- featureInstanceId: Optional[str] = None,
- workflow=None,
-) -> "_AutomationServiceHub":
- """
- Get a service hub for the automation feature using the service center.
- Resolves only the services declared in REQUIRED_SERVICES.
- No legacy fallback - service center only.
-
- Returns a hub-like object with: chat, ai, utils, billing, extraction,
- sharepoint, rbac, interfaceDbApp, interfaceDbComponent, interfaceDbChat,
- interfaceDbAutomation.
- """
- from modules.serviceCenter import getService
- from modules.serviceCenter.context import ServiceCenterContext
- from modules.features.automation.interfaceFeatureAutomation import getInterface as getAutomationInterface
-
- _workflow = workflow
- if _workflow is None:
- # Placeholder must have 'id' and 'workflowMode' to avoid AttributeError when services use context.workflow
- _workflow = type("_Placeholder", (), {"featureCode": FEATURE_CODE, "id": None, "workflowMode": None})()
- ctx = ServiceCenterContext(
- user=user,
- mandate_id=mandateId,
- feature_instance_id=featureInstanceId,
- workflow=_workflow,
- )
-
- hub = _AutomationServiceHub()
- hub.user = user
- hub.mandateId = mandateId
- hub.featureInstanceId = featureInstanceId
- hub._service_context = ctx # Store context so workflow updates propagate to services
- hub.workflow = workflow
- hub.featureCode = FEATURE_CODE
- hub.allowedProviders = None
-
- for spec in REQUIRED_SERVICES:
- key = spec["serviceKey"]
- try:
- svc = getService(key, ctx)
- setattr(hub, key, svc)
- except Exception as e:
- logger.warning(f"Could not resolve service '{key}' for automation: {e}")
- setattr(hub, key, None)
-
- # Copy interfaces from chat service for WorkflowManager compatibility
- if hub.chat:
- hub.interfaceDbApp = getattr(hub.chat, "interfaceDbApp", None)
- hub.interfaceDbComponent = getattr(hub.chat, "interfaceDbComponent", None)
- hub.interfaceDbChat = getattr(hub.chat, "interfaceDbChat", None)
-
- # RBAC for MethodBase action permission checks (workflow methods)
- hub.rbac = getattr(hub.interfaceDbApp, "rbac", None) if hub.interfaceDbApp else None
-
- # Set interfaceDbAutomation from feature interface
- hub.interfaceDbAutomation = getAutomationInterface(
- user, mandateId=mandateId, featureInstanceId=featureInstanceId
- )
-
- return hub
-
-
-class _AutomationServiceHub:
- """Lightweight hub exposing only services required by the automation feature."""
-
- user = None
- mandateId = None
- featureInstanceId = None
- _service_context = None # ServiceCenterContext; when workflow is set, context.workflow is updated
- workflow = None
- featureCode = "automation"
- allowedProviders = None
- interfaceDbApp = None
- interfaceDbComponent = None
- interfaceDbChat = None
- interfaceDbAutomation = None
- rbac = None
- chat = None
- ai = None
- utils = None
- billing = None
- extraction = None
- sharepoint = None
-
-
-def getFeatureDefinition() -> Dict[str, Any]:
- """Return the feature definition for registration."""
- return {
- "code": FEATURE_CODE,
- "label": FEATURE_LABEL,
- "icon": FEATURE_ICON,
- "autoCreateInstance": False,
- }
-
-
-def getUiObjects() -> List[Dict[str, Any]]:
- """Return UI objects for RBAC catalog registration."""
- return UI_OBJECTS
-
-
-def getResourceObjects() -> List[Dict[str, Any]]:
- """Return resource objects for RBAC catalog registration."""
- return RESOURCE_OBJECTS
-
-
-def getTemplateRoles() -> List[Dict[str, Any]]:
- """Return template roles for this feature."""
- return TEMPLATE_ROLES
-
-
-def registerFeature(catalogService) -> bool:
- """
- Register this feature's RBAC objects in the catalog.
-
- Args:
- catalogService: The RBAC catalog service instance
-
- Returns:
- True if registration was successful
- """
- try:
- # Register UI objects
- for uiObj in UI_OBJECTS:
- catalogService.registerUiObject(
- featureCode=FEATURE_CODE,
- objectKey=uiObj["objectKey"],
- label=uiObj["label"],
- meta=uiObj.get("meta")
- )
-
- # Register Resource objects
- for resObj in RESOURCE_OBJECTS:
- catalogService.registerResourceObject(
- featureCode=FEATURE_CODE,
- objectKey=resObj["objectKey"],
- label=resObj["label"],
- meta=resObj.get("meta")
- )
-
- # Sync template roles to database
- _syncTemplateRolesToDb()
-
- # Mark existing templates without isSystem field as system templates (migration)
- _migrateExistingTemplates()
-
- logger.info(f"Feature '{FEATURE_CODE}' registered {len(UI_OBJECTS)} UI objects and {len(RESOURCE_OBJECTS)} resource objects")
- return True
-
- except Exception as e:
- logger.error(f"Failed to register feature '{FEATURE_CODE}': {e}")
- return False
-
-
-def _syncTemplateRolesToDb() -> int:
- """
- Sync template roles and their AccessRules to the database.
- Creates global template roles (mandateId=None) if they don't exist.
-
- Returns:
- Number of roles created/updated
- """
- try:
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
-
- rootInterface = getRootInterface()
-
- # Get existing template roles for this feature (Pydantic models)
- existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
- # Filter to template roles (mandateId is None)
- templateRoles = [r for r in existingRoles if r.mandateId is None]
- existingRoleLabels = {r.roleLabel: str(r.id) for r in templateRoles}
-
- createdCount = 0
- for roleTemplate in TEMPLATE_ROLES:
- roleLabel = roleTemplate["roleLabel"]
-
- if roleLabel in existingRoleLabels:
- roleId = existingRoleLabels[roleLabel]
- # Ensure AccessRules exist for this role
- _ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
- else:
- # Create new template role
- newRole = Role(
- roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
- featureCode=FEATURE_CODE,
- mandateId=None, # Global template
- featureInstanceId=None,
- isSystemRole=False
- )
- createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
- roleId = createdRole.get("id")
-
- # Create AccessRules for this role
- _ensureAccessRulesForRole(rootInterface, roleId, roleTemplate.get("accessRules", []))
-
- logger.info(f"Created template role '{roleLabel}' with ID {roleId}")
- createdCount += 1
-
- if createdCount > 0:
- logger.info(f"Feature '{FEATURE_CODE}': Created {createdCount} template roles")
-
- return createdCount
-
- except Exception as e:
- logger.error(f"Error syncing template roles for feature '{FEATURE_CODE}': {e}")
- return 0
-
-
-def _ensureAccessRulesForRole(rootInterface, roleId: str, ruleTemplates: List[Dict[str, Any]]) -> int:
- """
- Ensure AccessRules exist for a role based on templates.
-
- Args:
- rootInterface: Root interface instance
- roleId: Role ID
- ruleTemplates: List of rule templates
-
- Returns:
- Number of rules created
- """
- from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext
-
- # Get existing rules for this role (Pydantic models)
- existingRules = rootInterface.getAccessRulesByRole(roleId)
-
- # Create a set of existing rule signatures to avoid duplicates
- # IMPORTANT: Use .value for enum comparison, not str() which gives "AccessRuleContext.DATA" in Python 3.11+
- existingSignatures = set()
- for rule in existingRules:
- sig = (rule.context.value if rule.context else None, rule.item)
- existingSignatures.add(sig)
-
- createdCount = 0
- for template in ruleTemplates:
- context = template.get("context", "UI")
- item = template.get("item")
- sig = (context, item)
-
- if sig in existingSignatures:
- continue
-
- # Map context string to enum
- if context == "UI":
- contextEnum = AccessRuleContext.UI
- elif context == "DATA":
- contextEnum = AccessRuleContext.DATA
- elif context == "RESOURCE":
- contextEnum = AccessRuleContext.RESOURCE
- else:
- contextEnum = context
-
- newRule = AccessRule(
- roleId=roleId,
- context=contextEnum,
- item=item,
- view=template.get("view", False),
- read=template.get("read"),
- create=template.get("create"),
- update=template.get("update"),
- delete=template.get("delete"),
- )
- rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
- createdCount += 1
-
- if createdCount > 0:
- logger.debug(f"Created {createdCount} AccessRules for role {roleId}")
-
- return createdCount
-
-
-def _migrateExistingTemplates() -> None:
- """
- Migration: Mark existing templates that have no isSystem/featureInstanceId fields
- as system templates (isSystem=True). This runs idempotently during feature registration.
- """
- try:
- from modules.features.automation.interfaceFeatureAutomation import getInterface
- from modules.security.rootAccess import getRootUser
- from modules.features.automation.datamodelFeatureAutomation import AutomationTemplate
-
- rootUser = getRootUser()
- automationInterface = getInterface(rootUser)
-
- # Get all templates from DB
- allTemplates = automationInterface.db.getRecordset(AutomationTemplate)
-
- migratedCount = 0
- for template in allTemplates:
- templateId = template.get("id")
- isSystem = template.get("isSystem")
- featureInstanceId = template.get("featureInstanceId")
-
- # Templates without isSystem set (old templates) → mark as system
- if isSystem is None and featureInstanceId is None:
- automationInterface.db.recordModify(
- AutomationTemplate,
- templateId,
- {"isSystem": True, "featureInstanceId": None}
- )
- migratedCount += 1
-
- if migratedCount > 0:
- logger.info(f"Migrated {migratedCount} existing templates to isSystem=True")
-
- except Exception as e:
- logger.warning(f"Template migration check failed (non-critical): {e}")
diff --git a/modules/features/automation/routeFeatureAutomation.py b/modules/features/automation/routeFeatureAutomation.py
deleted file mode 100644
index c6343b25..00000000
--- a/modules/features/automation/routeFeatureAutomation.py
+++ /dev/null
@@ -1,1264 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Automation routes for the backend API.
-Implements the endpoints for automation definition management.
-"""
-
-from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Response, Query
-from typing import List, Dict, Any, Optional
-from fastapi import status
-from fastapi.responses import JSONResponse
-import logging
-import json
-
-# Import interfaces and models
-from modules.features.automation.interfaceFeatureAutomation import getInterface as getAutomationInterface
-from modules.features.automation.mainAutomation import getAutomationServices
-from modules.auth import limiter, getRequestContext, RequestContext
-from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition, AutomationTemplate
-from modules.datamodels.datamodelChat import ChatWorkflow, ChatMessage, ChatLog, UserInputRequest, WorkflowModeEnum
-from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
-from modules.shared.attributeUtils import getModelAttributeDefinitions
-from modules.interfaces import interfaceDbChat
-from modules.interfaces.interfaceDbBilling import getInterface as _getBillingInterface
-# Configure logger
-logger = logging.getLogger(__name__)
-
-# Model attributes for AutomationDefinition and ChatWorkflow
-automationAttributes = getModelAttributeDefinitions(AutomationDefinition)
-workflowAttributes = getModelAttributeDefinitions(ChatWorkflow)
-
-# Create router for automation endpoints
-router = APIRouter(
- prefix="/api/automations",
- tags=["Manage Automations"],
- responses={
- 404: {"description": "Not found"},
- 400: {"description": "Bad request"},
- 401: {"description": "Unauthorized"},
- 403: {"description": "Forbidden"},
- 500: {"description": "Internal server error"}
- }
-)
-
-@router.get("", response_model=PaginatedResponse[AutomationDefinition])
-@limiter.limit("30/minute")
-def get_automations(
- request: Request,
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[AutomationDefinition]:
- """
- Get automation definitions with optional pagination, sorting, and filtering.
-
- Query Parameters:
- - pagination: JSON-encoded PaginationParams object, or None for no pagination
- """
- try:
- # Parse pagination parameter
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid pagination parameter: {str(e)}"
- )
-
- # AutomationDefinitions can belong to ANY feature instance within a mandate.
- # The list endpoint must show all definitions for the user's mandate, not filter by a specific featureInstanceId.
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None)
- result = chatInterface.getAllAutomationDefinitions(pagination=paginationParams)
-
- # If pagination was requested, result is PaginatedResult
- # If no pagination, result is List[Dict]
- # Note: Using JSONResponse to bypass Pydantic validation which would filter out sysCreatedBy
- # The enriched fields (sysCreatedByUserName, mandateName) are not in the Pydantic model
- from fastapi.responses import JSONResponse
-
- if paginationParams:
- response_data = {
- "items": result.items,
- "pagination": {
- "currentPage": paginationParams.page,
- "pageSize": paginationParams.pageSize,
- "totalItems": result.totalItems,
- "totalPages": result.totalPages,
- "sort": paginationParams.sort,
- "filters": paginationParams.filters
- }
- }
- else:
- response_data = {
- "items": result,
- "pagination": None
- }
-
- return JSONResponse(content=response_data)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting automations: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error getting automations: {str(e)}"
- )
-
-@router.get("/filter-values")
-@limiter.limit("60/minute")
-def get_automation_filter_values(
- request: Request,
- column: str = Query(..., description="Column key"),
- pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
- context: RequestContext = Depends(getRequestContext)
-) -> list:
- """Return distinct filter values for a column in automations."""
- try:
- from modules.routes.routeDataUsers import _handleFilterValuesRequest
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None)
- result = chatInterface.getAllAutomationDefinitions(pagination=None)
- items = result if isinstance(result, list) else [r if isinstance(r, dict) else r.model_dump() if hasattr(r, 'model_dump') else r for r in result]
- return _handleFilterValuesRequest(items, column, pagination)
- except Exception as e:
- logger.error(f"Error getting filter values for automations: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-
-@router.post("", response_model=AutomationDefinition)
-@limiter.limit("10/minute")
-def create_automation(
- request: Request,
- automation: AutomationDefinition,
- context: RequestContext = Depends(getRequestContext)
-) -> AutomationDefinition:
- """Create a new automation definition"""
- try:
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None, featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None)
- automationData = automation.model_dump()
- created = chatInterface.createAutomationDefinition(automationData)
- return created
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error creating automation: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error creating automation: {str(e)}"
- )
-
-@router.get("/attributes", response_model=Dict[str, Any])
-def get_automation_attributes(
- request: Request
-) -> Dict[str, Any]:
- """Get attribute definitions for AutomationDefinition model"""
- return {"attributes": automationAttributes}
-
-
-@router.get("/actions")
-@limiter.limit("30/minute")
-def get_available_actions(
- request: Request,
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """
- Get available workflow actions for template editor.
- Returns action definitions with parameters and example JSON snippets.
- """
- try:
- from modules.workflows.processing.shared.methodDiscovery import methods, discoverMethods
-
- # Ensure methods are discovered (need a service hub for discovery)
- if not methods:
- services = getAutomationServices(
- context.user,
- mandateId=context.mandateId,
- featureInstanceId=context.featureInstanceId,
- )
- discoverMethods(services)
-
- actionsList = []
- processedMethods = set()
-
- for methodName, methodInfo in methods.items():
- # Skip short name aliases - only process full class names (MethodXxx)
- if not methodName.startswith('Method'):
- continue
-
- shortName = methodName.replace('Method', '').lower()
-
- # Skip if already processed
- if shortName in processedMethods:
- continue
- processedMethods.add(shortName)
-
- methodInstance = methodInfo.get('instance')
- if not methodInstance:
- continue
-
- # Get actions from method instance
- for actionName, actionDef in methodInstance._actions.items():
- # Build action info
- actionInfo = {
- "method": shortName,
- "action": actionName,
- "actionId": actionDef.actionId if hasattr(actionDef, 'actionId') else f"{shortName}.{actionName}",
- "description": actionDef.description if hasattr(actionDef, 'description') else "",
- "category": actionDef.category if hasattr(actionDef, 'category') else "general",
- "parameters": []
- }
-
- # Add parameters from WorkflowActionParameter
- parametersDef = actionDef.parameters if hasattr(actionDef, 'parameters') else {}
- for paramName, paramDef in parametersDef.items():
- paramInfo = {
- "name": paramName,
- "type": paramDef.type if hasattr(paramDef, 'type') else "Any",
- "frontendType": paramDef.frontendType.value if hasattr(paramDef, 'frontendType') and paramDef.frontendType else "text",
- "required": paramDef.required if hasattr(paramDef, 'required') else False,
- "default": paramDef.default if hasattr(paramDef, 'default') else None,
- "description": paramDef.description if hasattr(paramDef, 'description') else "",
- }
- if hasattr(paramDef, 'frontendOptions') and paramDef.frontendOptions:
- paramInfo["frontendOptions"] = paramDef.frontendOptions
- actionInfo["parameters"].append(paramInfo)
-
- # Build example JSON snippet for copy/paste
- exampleParams = {}
- for paramName, paramDef in parametersDef.items():
- if hasattr(paramDef, 'required') and paramDef.required:
- exampleParams[paramName] = f"{{{{KEY:{paramName}}}}}"
- else:
- default = paramDef.default if hasattr(paramDef, 'default') else None
- exampleParams[paramName] = default or f"{{{{KEY:{paramName}}}}}"
-
- actionInfo["exampleJson"] = {
- "execMethod": shortName,
- "execAction": actionName,
- "execParameters": exampleParams,
- "execResultLabel": f"{shortName}_{actionName}_result"
- }
-
- actionsList.append(actionInfo)
-
- return JSONResponse(content={"actions": actionsList})
- except Exception as e:
- logger.error(f"Error getting available actions: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error getting available actions: {str(e)}"
- )
-
-
-# -----------------------------------------------------------------------------
-# Workflow routes under /{instanceId}/workflows/ (instance-scoped)
-# -----------------------------------------------------------------------------
-
-def _validateAutomationInstanceAccess(instanceId: str, context: RequestContext) -> Optional[str]:
- """Validate user has access to the automation feature instance. Returns mandateId."""
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
- instance = rootInterface.getFeatureInstance(instanceId)
- if not instance:
- raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
- featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
- if not featureAccess or not featureAccess.enabled:
- raise HTTPException(status_code=403, detail="Access denied to this feature instance")
- return str(instance.mandateId) if instance.mandateId else None
-
-
-def _getAutomationServiceChat(context: RequestContext, featureInstanceId: str = None, mandateId: str = None):
- """Get chat interface with feature instance context for workflows."""
- return interfaceDbChat.getInterface(
- context.user,
- mandateId=mandateId or (str(context.mandateId) if context.mandateId else None),
- featureInstanceId=featureInstanceId
- )
-
-
-@router.get("/{instanceId}/workflows", response_model=PaginatedResponse[ChatWorkflow])
-@limiter.limit("120/minute")
-def get_automation_workflows(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- page: int = Query(1, ge=1, description="Page number (legacy)"),
- pageSize: int = Query(20, ge=1, le=100, description="Items per page (legacy)"),
- context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[ChatWorkflow]:
- """Get all workflows for this automation feature instance."""
- try:
- mandateId = _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId, mandateId=mandateId)
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
- else:
- paginationParams = PaginationParams(page=page, pageSize=pageSize)
- result = chatInterface.getWorkflows(pagination=paginationParams)
- if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=result.totalItems,
- totalPages=result.totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters
- )
- )
- return PaginatedResponse(items=result, pagination=None)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting automation workflows: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error getting workflows: {str(e)}")
-
-
-# Workflow attributes (ChatWorkflow model)
-@router.get("/{instanceId}/workflows/attributes", response_model=Dict[str, Any])
-@limiter.limit("120/minute")
-def get_automation_workflow_attributes(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Get attribute definitions for ChatWorkflow model."""
- _validateAutomationInstanceAccess(instanceId, context)
- return {"attributes": workflowAttributes}
-
-
-# Actions (must be before /{workflowId} to avoid path conflict)
-@router.get("/{instanceId}/workflows/actions", response_model=Dict[str, Any])
-@limiter.limit("120/minute")
-def get_automation_workflow_actions(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Get all available workflow actions."""
- try:
- mandateId = _validateAutomationInstanceAccess(instanceId, context)
- services = getAutomationServices(context.user, mandateId=mandateId, featureInstanceId=instanceId)
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
- discoverMethods(services)
- allActions = []
- for methodName, methodInfo in methods.items():
- if methodName.startswith('Method'):
- continue
- methodInstance = methodInfo['instance']
- for actionName, actionInfo in methodInstance.actions.items():
- allActions.append({
- "module": methodInstance.name,
- "actionId": f"{methodInstance.name}.{actionName}",
- "name": actionName,
- "description": actionInfo.get('description', ''),
- "parameters": actionInfo.get('parameters', {})
- })
- return {"actions": allActions}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting actions: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Failed to get actions: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/actions/{method}", response_model=Dict[str, Any])
-@limiter.limit("120/minute")
-def get_automation_method_actions(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- method: str = Path(..., description="Method name"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Get actions for a specific method."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- services = getAutomationServices(context.user, mandateId=str(context.mandateId) if context.mandateId else None, featureInstanceId=instanceId)
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
- discoverMethods(services)
- methodInstance = None
- for mn, mi in methods.items():
- if mi['instance'].name == method:
- methodInstance = mi['instance']
- break
- if not methodInstance:
- raise HTTPException(status_code=404, detail=f"Method '{method}' not found")
- actions = [{"actionId": f"{methodInstance.name}.{an}", "name": an, "description": ai.get('description', ''), "parameters": ai.get('parameters', {})}
- for an, ai in methodInstance.actions.items()]
- return {"module": methodInstance.name, "description": methodInstance.description, "actions": actions}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting actions for {method}: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Failed to get actions: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/actions/{method}/{action}", response_model=Dict[str, Any])
-@limiter.limit("120/minute")
-def get_automation_action_schema(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- method: str = Path(..., description="Method name"),
- action: str = Path(..., description="Action name"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Get action schema for a specific action."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- services = getAutomationServices(context.user, mandateId=str(context.mandateId) if context.mandateId else None, featureInstanceId=instanceId)
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods, methods
- discoverMethods(services)
- methodInstance = None
- for mn, mi in methods.items():
- if mi['instance'].name == method:
- methodInstance = mi['instance']
- break
- if not methodInstance:
- raise HTTPException(status_code=404, detail=f"Method '{method}' not found")
- if action not in methodInstance.actions:
- raise HTTPException(status_code=404, detail=f"Action '{action}' not found in method '{method}'")
- ai = methodInstance.actions[action]
- return {"method": methodInstance.name, "action": action, "actionId": f"{methodInstance.name}.{action}",
- "description": ai.get('description', ''), "parameters": ai.get('parameters', {})}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting action schema: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Failed to get action schema: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/{workflowId}", response_model=ChatWorkflow)
-@limiter.limit("120/minute")
-def get_automation_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> ChatWorkflow:
- """Get workflow by ID."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail="Workflow not found")
- return workflow
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting workflow: {str(e)}")
- raise HTTPException(status_code=500, detail=f"Failed to get workflow: {str(e)}")
-
-
-@router.put("/{instanceId}/workflows/{workflowId}", response_model=ChatWorkflow)
-@limiter.limit("120/minute")
-def update_automation_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- workflowData: Dict[str, Any] = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> ChatWorkflow:
- """Update workflow by ID."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail="Workflow not found")
- if not chatInterface.checkRbacPermission(ChatWorkflow, "update", workflowId):
- raise HTTPException(status_code=403, detail="You don't have permission to update this workflow")
- updated = chatInterface.updateWorkflow(workflowId, workflowData)
- if not updated:
- raise HTTPException(status_code=500, detail="Failed to update workflow")
- return updated
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error updating workflow: {str(e)}")
- raise HTTPException(status_code=500, detail=f"Failed to update workflow: {str(e)}")
-
-
-@router.delete("/{instanceId}/workflows/{workflowId}")
-@limiter.limit("120/minute")
-def delete_automation_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Delete workflow and associated data."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- if not chatInterface.checkRbacPermission(ChatWorkflow, "delete", workflowId):
- raise HTTPException(status_code=403, detail="You don't have permission to delete this workflow")
- success = chatInterface.deleteWorkflow(workflowId)
- if not success:
- raise HTTPException(status_code=500, detail="Failed to delete workflow")
- return {"id": workflowId, "message": "Workflow and associated data deleted successfully"}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error deleting workflow: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error deleting workflow: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/{workflowId}/status", response_model=ChatWorkflow)
-@limiter.limit("120/minute")
-def get_automation_workflow_status(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> ChatWorkflow:
- """Get workflow status."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- return workflow
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting workflow status: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error getting workflow status: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/{workflowId}/logs", response_model=PaginatedResponse[ChatLog])
-@limiter.limit("120/minute")
-def get_automation_workflow_logs(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- logId: Optional[str] = Query(None),
- pagination: Optional[str] = Query(None),
- context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[ChatLog]:
- """Get workflow logs."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- paginationParams = None
- if pagination:
- try:
- pd = json.loads(pagination)
- if pd:
- pd = normalize_pagination_dict(pd)
- paginationParams = PaginationParams(**pd)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(status_code=400, detail=f"Invalid pagination: {str(e)}")
- result = chatInterface.getLogs(workflowId, pagination=paginationParams)
- if logId:
- allLogs = result.items if paginationParams else result
- idx = next((i for i, log in enumerate(allLogs) if log.id == logId), -1)
- if idx >= 0:
- return PaginatedResponse(items=allLogs[idx + 1:], pagination=None)
- if paginationParams:
- return PaginatedResponse(items=result.items, pagination=PaginationMetadata(
- currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
- totalItems=result.totalItems, totalPages=result.totalPages,
- sort=paginationParams.sort, filters=paginationParams.filters))
- return PaginatedResponse(items=result, pagination=None)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting workflow logs: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error getting workflow logs: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/{workflowId}/messages", response_model=PaginatedResponse[ChatMessage])
-@limiter.limit("120/minute")
-def get_automation_workflow_messages(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- messageId: Optional[str] = Query(None),
- pagination: Optional[str] = Query(None),
- context: RequestContext = Depends(getRequestContext)
-) -> PaginatedResponse[ChatMessage]:
- """Get workflow messages."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- paginationParams = None
- if pagination:
- try:
- pd = json.loads(pagination)
- if pd:
- pd = normalize_pagination_dict(pd)
- paginationParams = PaginationParams(**pd)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(status_code=400, detail=f"Invalid pagination: {str(e)}")
- result = chatInterface.getMessages(workflowId, pagination=paginationParams)
- if messageId:
- allMsgs = result.items if paginationParams else result
- idx = next((i for i, m in enumerate(allMsgs) if m.id == messageId), -1)
- if idx >= 0:
- return PaginatedResponse(items=allMsgs[idx + 1:], pagination=None)
- if paginationParams:
- return PaginatedResponse(items=result.items, pagination=PaginationMetadata(
- currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
- totalItems=result.totalItems, totalPages=result.totalPages,
- sort=paginationParams.sort, filters=paginationParams.filters))
- return PaginatedResponse(items=result, pagination=None)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting workflow messages: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error getting workflow messages: {str(e)}")
-
-
-@router.delete("/{instanceId}/workflows/{workflowId}/messages/{messageId}")
-@limiter.limit("120/minute")
-def delete_automation_workflow_message(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- messageId: str = Path(..., description="Message ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Delete message from workflow."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- success = chatInterface.deleteMessage(workflowId, messageId)
- if not success:
- raise HTTPException(status_code=404, detail=f"Message {messageId} not found")
- return {"workflowId": workflowId, "messageId": messageId, "message": "Message deleted successfully"}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error deleting message: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error deleting message: {str(e)}")
-
-
-@router.delete("/{instanceId}/workflows/{workflowId}/messages/{messageId}/files/{fileId}")
-@limiter.limit("120/minute")
-def delete_automation_file_from_message(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- messageId: str = Path(..., description="Message ID"),
- fileId: str = Path(..., description="File ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Delete file from message."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- success = chatInterface.deleteFileFromMessage(workflowId, messageId, fileId)
- if not success:
- raise HTTPException(status_code=404, detail=f"File {fileId} not found")
- return {"workflowId": workflowId, "messageId": messageId, "fileId": fileId, "message": "File reference deleted successfully"}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error deleting file: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error deleting file: {str(e)}")
-
-
-@router.get("/{instanceId}/workflows/{workflowId}/chatData")
-@limiter.limit("120/minute")
-def get_automation_workflow_chat_data(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- afterTimestamp: Optional[float] = Query(None),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """Get unified chat data for workflow."""
- try:
- _validateAutomationInstanceAccess(instanceId, context)
- chatInterface = _getAutomationServiceChat(context, featureInstanceId=instanceId)
- workflow = chatInterface.getWorkflow(workflowId)
- if not workflow:
- raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
- billingInterface = _getBillingInterface(context.user, context.mandateId)
- workflowCost = billingInterface.getWorkflowCost(workflowId)
- return chatInterface.getUnifiedChatData(workflowId, afterTimestamp, workflowCost=workflowCost)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting chat data: {str(e)}", exc_info=True)
- raise HTTPException(status_code=500, detail=f"Error getting chat data: {str(e)}")
-
-
-@router.post("/{instanceId}/workflows/{workflowId}/stop", response_model=ChatWorkflow)
-@limiter.limit("30/minute")
-async def stop_automation_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> ChatWorkflow:
- """Stop a running automation workflow. Uses instance-scoped services."""
- try:
- from modules.workflows.automation import chatStop
- mandateId = _validateAutomationInstanceAccess(instanceId, context)
- services = getAutomationServices(
- context.user,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- )
- services.featureCode = "automation"
- return await chatStop(
- context.user,
- workflowId,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- featureCode="automation",
- services=services,
- )
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error stopping automation workflow: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-
-@router.get("/{automationId}", response_model=AutomationDefinition)
-@limiter.limit("30/minute")
-def get_automation(
- request: Request,
- automationId: str = Path(..., description="Automation ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> AutomationDefinition:
- """Get a single automation definition by ID"""
- try:
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None)
- automation = chatInterface.getAutomationDefinition(automationId)
- if not automation:
- raise HTTPException(
- status_code=404,
- detail=f"Automation {automationId} not found"
- )
-
- return automation
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting automation: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error getting automation: {str(e)}"
- )
-
-@router.put("/{automationId}", response_model=AutomationDefinition)
-@limiter.limit("10/minute")
-def update_automation(
- request: Request,
- automationId: str = Path(..., description="Automation ID"),
- automation: AutomationDefinition = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> AutomationDefinition:
- """Update an automation definition"""
- try:
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None)
- automationData = automation.model_dump()
- automationData.pop("executionLogs", None)
- updated = chatInterface.updateAutomationDefinition(automationId, automationData)
- return updated
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error updating automation: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error updating automation: {str(e)}"
- )
-
-@router.patch("/{automationId}/status")
-@limiter.limit("30/minute")
-def update_automation_status(
- request: Request,
- automationId: str = Path(..., description="Automation ID"),
- active: bool = Body(..., embed=True),
- context: RequestContext = Depends(getRequestContext)
-) -> AutomationDefinition:
- """Update only the active status of an automation definition"""
- try:
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None)
-
- # Get existing automation
- automation = chatInterface.getAutomationDefinition(automationId)
- if not automation:
- raise HTTPException(
- status_code=404,
- detail=f"Automation {automationId} not found"
- )
-
- # Update only the active field
- automationData = automation if isinstance(automation, dict) else automation.model_dump()
- automationData['active'] = active
-
- updated = chatInterface.updateAutomationDefinition(automationId, automationData)
- return updated
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error updating automation status: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error updating automation status: {str(e)}"
- )
-
-
-@router.delete("/{automationId}")
-@limiter.limit("10/minute")
-def delete_automation(
- request: Request,
- automationId: str = Path(..., description="Automation ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Response:
- """Delete an automation definition"""
- try:
- chatInterface = getAutomationInterface(context.user, mandateId=str(context.mandateId) if context.mandateId else None)
- success = chatInterface.deleteAutomationDefinition(automationId)
- if success:
- return Response(status_code=204)
- else:
- raise HTTPException(
- status_code=500,
- detail="Failed to delete automation"
- )
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error deleting automation: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error deleting automation: {str(e)}"
- )
-
-@router.post("/{instanceId}/start", response_model=ChatWorkflow)
-@limiter.limit("120/minute")
-async def start_automation_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: Optional[str] = Query(None, description="Optional ID of the workflow to continue"),
- workflowMode: WorkflowModeEnum = Query(..., description="Workflow mode: 'Dynamic' or 'Automation' (mandatory)"),
- userInput: UserInputRequest = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> ChatWorkflow:
- """Start a new workflow or continue an existing one."""
- try:
- from modules.workflows.automation import chatStart
- mandateId = _validateAutomationInstanceAccess(instanceId, context)
- services = getAutomationServices(
- context.user,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- )
- services.featureCode = "automation"
- if hasattr(userInput, 'allowedProviders') and userInput.allowedProviders:
- services.allowedProviders = userInput.allowedProviders
- workflow = await chatStart(
- context.user,
- userInput,
- workflowMode,
- workflowId,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- featureCode="automation",
- services=services,
- )
- return workflow
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error in start_automation_workflow: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-
-@router.post("/{automationId}/execute", response_model=ChatWorkflow)
-@limiter.limit("5/minute")
-async def execute_automation_route(
- request: Request,
- automationId: str = Path(..., description="Automation ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> ChatWorkflow:
- """Execute an automation immediately (test mode)"""
- try:
- services = getAutomationServices(
- context.user,
- mandateId=context.mandateId,
- featureInstanceId=context.featureInstanceId,
- )
-
- # Load automation with current user's context (user has RBAC permissions via UI)
- automation = services.interfaceDbAutomation.getAutomationDefinition(automationId, includeSystemFields=True)
- if not automation:
- raise ValueError(f"Automation {automationId} not found")
-
- from modules.workflows.automation import executeAutomation, chatStop
- workflow = await executeAutomation(automationId, automation, context.user, services)
- return workflow
- except HTTPException:
- raise
- except ValueError as e:
- raise HTTPException(
- status_code=404,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error executing automation: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error executing automation: {str(e)}"
- )
-
-
-# =============================================================================
-# AutomationTemplate Routes (DB-persistiert)
-# =============================================================================
-# Separater Router für /api/automation-templates
-
-templateRouter = APIRouter(
- prefix="/api/automation-templates",
- tags=["Manage Automation Templates"],
- responses={
- 404: {"description": "Not found"},
- 400: {"description": "Bad request"},
- 401: {"description": "Unauthorized"},
- 403: {"description": "Forbidden"},
- 500: {"description": "Internal server error"}
- }
-)
-
-# Model attributes for AutomationTemplate
-templateAttributes = getModelAttributeDefinitions(AutomationTemplate)
-
-
-@templateRouter.get("", response_model=PaginatedResponse[AutomationTemplate])
-@limiter.limit("30/minute")
-def get_db_templates(
- request: Request,
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """
- Get automation templates from database (RBAC-filtered, MY = own templates).
-
- Query Parameters:
- - pagination: JSON-encoded PaginationParams object, or None for no pagination
- """
- try:
- # Parse pagination parameter
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid pagination parameter: {str(e)}"
- )
-
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- result = chatInterface.getAllAutomationTemplates(pagination=paginationParams)
-
- if paginationParams:
- response_data = {
- "items": result.items,
- "pagination": {
- "currentPage": paginationParams.page,
- "pageSize": paginationParams.pageSize,
- "totalItems": result.totalItems,
- "totalPages": result.totalPages,
- "sort": paginationParams.sort,
- "filters": paginationParams.filters
- }
- }
- else:
- response_data = {
- "items": result,
- "pagination": None
- }
-
- return JSONResponse(content=response_data)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting templates: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error getting templates: {str(e)}"
- )
-
-
-@templateRouter.get("/filter-values")
-@limiter.limit("60/minute")
-def get_template_filter_values(
- request: Request,
- column: str = Query(..., description="Column key"),
- pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
- context: RequestContext = Depends(getRequestContext)
-) -> list:
- """Return distinct filter values for a column in automation templates."""
- try:
- from modules.routes.routeDataUsers import _handleFilterValuesRequest
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- result = chatInterface.getAllAutomationTemplates(pagination=None)
- items = [r if isinstance(r, dict) else r.model_dump() if hasattr(r, 'model_dump') else r for r in result]
- return _handleFilterValuesRequest(items, column, pagination)
- except Exception as e:
- logger.error(f"Error getting filter values for automation templates: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-
-@templateRouter.get("/attributes", response_model=Dict[str, Any])
-def get_template_attributes(
- request: Request
-) -> Dict[str, Any]:
- """Get attribute definitions for AutomationTemplate model"""
- return {"attributes": templateAttributes}
-
-
-@templateRouter.get("/{templateId}")
-@limiter.limit("30/minute")
-def get_db_template(
- request: Request,
- templateId: str = Path(..., description="Template ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """Get a single automation template by ID"""
- try:
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- template = chatInterface.getAutomationTemplate(templateId)
- if not template:
- raise HTTPException(
- status_code=404,
- detail=f"Template {templateId} not found"
- )
-
- return JSONResponse(content=template)
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting template: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error getting template: {str(e)}"
- )
-
-
-@templateRouter.post("")
-@limiter.limit("10/minute")
-def create_db_template(
- request: Request,
- templateData: Dict[str, Any] = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """Create a new automation template"""
- try:
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- created = chatInterface.createAutomationTemplate(templateData, isSysAdmin=context.hasSysAdminRole)
- return JSONResponse(content=created)
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error creating template: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error creating template: {str(e)}"
- )
-
-
-@templateRouter.put("/{templateId}")
-@limiter.limit("10/minute")
-def update_db_template(
- request: Request,
- templateId: str = Path(..., description="Template ID"),
- templateData: Dict[str, Any] = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """Update an automation template"""
- try:
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- updated = chatInterface.updateAutomationTemplate(templateId, templateData, isSysAdmin=context.hasSysAdminRole)
- return JSONResponse(content=updated)
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error updating template: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error updating template: {str(e)}"
- )
-
-
-@templateRouter.delete("/{templateId}")
-@limiter.limit("10/minute")
-def delete_db_template(
- request: Request,
- templateId: str = Path(..., description="Template ID"),
- context: RequestContext = Depends(getRequestContext)
-) -> Response:
- """Delete an automation template"""
- try:
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- success = chatInterface.deleteAutomationTemplate(templateId, isSysAdmin=context.hasSysAdminRole)
- if success:
- return Response(status_code=204)
- else:
- raise HTTPException(
- status_code=404,
- detail="Template not found or no permission"
- )
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error deleting template: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error deleting template: {str(e)}"
- )
-
-
-@templateRouter.post("/{templateId}/duplicate")
-@limiter.limit("10/minute")
-def duplicate_db_template(
- request: Request,
- templateId: str = Path(..., description="Template ID to duplicate"),
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """Duplicate a template into the current feature instance (system or instance template)."""
- try:
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- duplicated = chatInterface.duplicateAutomationTemplate(templateId)
- return JSONResponse(content=duplicated)
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error duplicating template: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error duplicating template: {str(e)}"
- )
-
-
-@router.post("/{automationId}/duplicate")
-@limiter.limit("10/minute")
-def duplicate_automation(
- request: Request,
- automationId: str = Path(..., description="Automation definition ID to duplicate"),
- context: RequestContext = Depends(getRequestContext)
-) -> JSONResponse:
- """Duplicate an automation definition within the same feature instance."""
- try:
- chatInterface = getAutomationInterface(
- context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
- featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
- )
- duplicated = chatInterface.duplicateAutomationDefinition(automationId)
- return JSONResponse(content=duplicated)
- except HTTPException:
- raise
- except PermissionError as e:
- raise HTTPException(
- status_code=403,
- detail=str(e)
- )
- except Exception as e:
- logger.error(f"Error duplicating automation: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error duplicating automation: {str(e)}"
- )
diff --git a/modules/features/automation/subAutomationTemplates.py b/modules/features/automation/subAutomationTemplates.py
deleted file mode 100644
index 0795f757..00000000
--- a/modules/features/automation/subAutomationTemplates.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Automation templates for workflow definitions.
-
-Contains predefined workflow templates that can be used to create automation definitions.
-"""
-
-from typing import Dict, Any, List
-
-# Automation templates structure
-AUTOMATION_TEMPLATES: Dict[str, Any] = {
- "sets": [
- {
- "template": {
- "overview": "SharePoint Themen Zusammenfassung",
- "tasks": [
- {
- "id": "Task01",
- "title": "SharePoint Themen Zusammenfassung",
- "description": "Erstellt eine Zusammenfassung aller SharePoint Sites und deren Inhalte",
- "objective": "Erstelle eine Zusammenfassung aller SharePoint Themen (Sites) und deren Inhalte als Word-Dokument",
- "actionList": [
- {
- "execMethod": "sharepoint",
- "execAction": "findDocumentPath",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "searchQuery": "*",
- "maxResults": 100
- },
- "execResultLabel": "sharepoint_sites_found"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "listDocuments",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "pathQuery": "{{KEY:sharepointBasePath}}",
- "includeSubfolders": True
- },
- "execResultLabel": "sharepoint_structure"
- },
- {
- "execMethod": "ai",
- "execAction": "process",
- "execParameters": {
- "aiPrompt": "{{KEY:summaryPrompt}}",
- "documentList": ["sharepoint_sites_found", "sharepoint_structure"],
- "resultType": "docx"
- },
- "execResultLabel": "sharepoint_summary"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadDocument",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "documentList": ["sharepoint_summary"],
- "pathQuery": "{{KEY:sharepointFolderNameDestination}}"
- },
- "execResultLabel": "sharepoint_upload_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "connection:msft:p.motsch@valueon.ch",
- "sharepointBasePath": "/sites/company-share",
- "sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
- "summaryPrompt": "Erstelle eine umfassende Zusammenfassung aller SharePoint Sites und deren Inhalte. Strukturiere das Dokument nach Sites und fasse für jede Site die wichtigsten Themen, Ordnerstrukturen und Dokumente zusammen. Erstelle ein professionelles Word-Dokument mit Überschriften, Abschnitten und einer klaren Gliederung. Berücksichtige alle gefundenen Sites, deren Ordnerstrukturen und dokumentiere die wichtigsten Inhalte pro Site."
- }
- },
- {
- "template": {
- "overview": "Immobilienrecherche Zürich",
- "tasks": [
- {
- "id": "Task02",
- "title": "Immobilienrecherche Zürich",
- "description": "Webrecherche nach Immobilien im Kanton Zürich und Speicherung in Excel",
- "objective": "Immobilienrecherche im Kanton Zürich zum Verkauf (5-20 Mio. CHF) und speichere Ergebnisse in Excel-Liste auf SharePoint",
- "actionList": [
- {
- "execMethod": "ai",
- "execAction": "webResearch",
- "execParameters": {
- "prompt": "{{KEY:immobilienResearchPrompt}}",
- "urlList": ["{{KEY:immobilienResearchUrl}}"]
- },
- "execResultLabel": "immobilien_research_results"
- },
- {
- "execMethod": "ai",
- "execAction": "process",
- "execParameters": {
- "aiPrompt": "{{KEY:excelFormatPrompt}}",
- "documentList": ["immobilien_research_results"],
- "resultType": "xlsx"
- },
- "execResultLabel": "immobilien_excel_list"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadDocument",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "documentList": ["immobilien_excel_list"],
- "pathQuery": "{{KEY:sharepointFolderNameDestination}}"
- },
- "execResultLabel": "immobilien_upload_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "connection:msft:p.motsch@valueon.ch",
- "sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
- "immobilienResearchUrl": ["https://www.homegate.ch", "https://www.immoscout24.ch", "https://www.immowelt.ch"],
- "immobilienResearchPrompt": "Suche nach Immobilien zum Verkauf im Kanton Zürich, Schweiz, im Preisbereich von 5-20 Millionen CHF. Sammle Informationen zu: Ort, Preis, Beschreibung, URL zu Bildern, Verkäufer/Kontaktinformationen.",
- "excelFormatPrompt": "Erstelle eine Excel-Datei mit den recherchierten Immobilien. Jede Immobilie soll eine Zeile sein mit den folgenden Spalten: Ort, Preis (in CHF), Beschreibung, URL zu Bild, Verkäufer. Verwende die Daten aus der Webrecherche."
- }
- },
- {
- "template": {
- "overview": "Spesenbelege Zusammenfassung",
- "tasks": [
- {
- "id": "Task03",
- "title": "Spesenbelege CSV Zusammenfassung",
- "description": "Liest PDF-Spesenbelege aus SharePoint-Ordner und erstellt CSV-Zusammenfassung",
- "objective": "Extrahiere alle PDF-Spesenbelege aus einem SharePoint-Ordner und erstelle eine CSV-Datei mit allen Spesendaten im selben Ordner",
- "actionList": [
- {
- "execMethod": "sharepoint",
- "execAction": "findDocumentPath",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "searchQuery": "{{KEY:sharepointFolderNameSource}}:files:.pdf",
- "maxResults": 100
- },
- "execResultLabel": "sharepoint_pdf_files"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "readDocuments",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "pathObject": "sharepoint_pdf_files"
- },
- "execResultLabel": "spesenbelege_documents"
- },
- {
- "execMethod": "ai",
- "execAction": "process",
- "execParameters": {
- "aiPrompt": "{{KEY:expenseExtractionPrompt}}",
- "documentList": ["spesenbelege_documents"],
- "resultType": "csv"
- },
- "execResultLabel": "spesenbelege_csv"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadDocument",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "documentList": ["spesenbelege_csv"],
- "pathQuery": "{{KEY:sharepointFolderNameDestination}}"
- },
- "execResultLabel": "spesenbelege_upload_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "connection:msft:p.motsch@valueon.ch",
- "sharepointFolderNameSource": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/expenses",
- "sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
- "expenseExtractionPrompt": "Verarbeite alle bereitgestellten Dokumente, aber extrahiere nur Daten aus PDF-Spesenbelegen (ignoriere andere Dateitypen). Für jeden gefundenen PDF-Spesenbeleg extrahiere als separaten Datensatz: Datum, Betrag, MWST %, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler/Verkäufer, Steuerbetrag. Erstelle eine CSV-Datei mit einer Zeile pro Spesenbeleg. Verwende die folgenden Spaltenüberschriften: Datum, Betrag, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler, Steuerbetrag. Stelle sicher, dass alle Beträge numerisch sind und Datumswerte im Format YYYY-MM-DD vorliegen. Wenn ein Dokument kein Spesenbeleg ist, ignoriere es."
- }
- },
- {
- "template": {
- "overview": "Preprocessing Server Data Update",
- "tasks": [
- {
- "id": "Task04",
- "title": "Trigger Preprocessing Server",
- "description": "Triggers the preprocessing server at customer tenant to update database with configuration",
- "objective": "Call preprocessing server endpoint to update database with provided configuration JSON",
- "actionList": [
- {
- "execMethod": "context",
- "execAction": "triggerPreprocessingServer",
- "execParameters": {
- "endpoint": "{{KEY:endpoint}}",
- "configJson": "{{KEY:configJson}}",
- "authSecretConfigKey": "{{KEY:authSecretConfigKey}}"
- },
- "execResultLabel": "preprocessing_server_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "endpoint": "https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataprocessor/update-db-with-config",
- "authSecretConfigKey": "PREPROCESS_ALTHAUS_CHAT_SECRET",
- "configJson": "{\"tables\":[{\"name\":\"Artikel\",\"powerbi_table_name\":\"Artikel\",\"steps\":[{\"keep\":{\"columns\":[\"I_ID\",\"Artikelbeschrieb\",\"Artikelbezeichnung\",\"Artikelgruppe\",\"Artikelkategorie\",\"Artikelkürzel\",\"Artikelnummer\",\"Einheit\",\"Gesperrt\",\"Keywords\",\"Lieferant\",\"Warengruppe\"]}},{\"fillna\":{\"column\":\"Lieferant\",\"value\":\"Unbekannt\"}}]},{\"name\":\"Einkaufspreis\",\"powerbi_table_name\":\"Einkaufspreis\",\"steps\":[{\"to_numeric\":{\"column\":\"EP_CHF\",\"errors\":\"coerce\"}},{\"dropna\":{\"subset\":[\"EP_CHF\"]}}]}]}"
- }
- },
- {
- "template": {
- "overview": "JIRA to SharePoint Ticket Synchronization",
- "tasks": [
- {
- "id": "Task01",
- "title": "Sync JIRA Tickets to SharePoint",
- "description": "Export JIRA tickets, merge with SharePoint file, upload back, and import changes to JIRA",
- "objective": "Synchronize JIRA tickets with SharePoint file (bidirectional sync)",
- "actionList": [
- {
- "execMethod": "sharepoint",
- "execAction": "findSiteByUrl",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "hostname": "{{KEY:sharepointHostname}}",
- "sitePath": "{{KEY:sharepointSitePath}}"
- },
- "execResultLabel": "sharepoint_site"
- },
- {
- "execMethod": "jira",
- "execAction": "connectJira",
- "execParameters": {
- "apiUsername": "{{KEY:jiraUsername}}",
- "apiTokenConfigKey": "{{KEY:jiraTokenConfigKey}}",
- "apiUrl": "{{KEY:jiraUrl}}",
- "projectCode": "{{KEY:jiraProjectCode}}",
- "issueType": "{{KEY:jiraIssueType}}",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "jira_connection"
- },
- {
- "execMethod": "jira",
- "execAction": "exportTicketsAsJson",
- "execParameters": {
- "connectionId": "jira_connection",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "jira_exported_tickets"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "downloadFileByPath",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
- },
- "execResultLabel": "existing_file_content"
- },
- {
- "execMethod": "jira",
- "execAction": "parseExcelContent",
- "execParameters": {
- "excelContent": "existing_file_content",
- "skipRows": 3,
- "hasCustomHeaders": True
- },
- "execResultLabel": "existing_parsed_data"
- },
- {
- "execMethod": "jira",
- "execAction": "mergeTicketData",
- "execParameters": {
- "jiraData": "jira_exported_tickets",
- "existingData": "existing_parsed_data",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}",
- "idField": "ID"
- },
- "execResultLabel": "merged_ticket_data"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "copyFile",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "sourceFolder": "{{KEY:sharepointMainFolder}}",
- "sourceFile": "{{KEY:syncFileName}}",
- "destFolder": "{{KEY:sharepointBackupFolder}}",
- "destFile": "backup_{{TIMESTAMP}}_{{KEY:syncFileName}}"
- },
- "execResultLabel": "file_backup"
- },
- {
- "execMethod": "jira",
- "execAction": "createExcelContent",
- "execParameters": {
- "data": "merged_ticket_data",
- "headers": "existing_parsed_data",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "new_file_content"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadFile",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "folderPath": "{{KEY:sharepointMainFolder}}",
- "fileName": "{{KEY:syncFileName}}",
- "content": "new_file_content"
- },
- "execResultLabel": "uploaded_file"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "downloadFileByPath",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
- },
- "execResultLabel": "uploaded_file_content"
- },
- {
- "execMethod": "jira",
- "execAction": "parseExcelContent",
- "execParameters": {
- "excelContent": "uploaded_file_content",
- "skipRows": 3,
- "hasCustomHeaders": True
- },
- "execResultLabel": "import_data"
- },
- {
- "execMethod": "jira",
- "execAction": "importTicketsFromJson",
- "execParameters": {
- "connectionId": "jira_connection",
- "ticketData": "import_data",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "import_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "sharepointConnection": "connection:msft:patrick.motsch@delta.ch",
- "sharepointHostname": "deltasecurityag.sharepoint.com",
- "sharepointSitePath": "SteeringBPM",
- "sharepointMainFolder": "/General/50 Docs hosted by SELISE",
- "sharepointBackupFolder": "/General/50 Docs hosted by SELISE/SyncHistory",
- "syncFileName": "DELTAgroup x SELISE Ticket Exchange List.xlsx",
- "jiraUsername": "p.motsch@valueon.ch",
- "jiraTokenConfigKey": "Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET",
- "jiraUrl": "https://deltasecurity.atlassian.net",
- "jiraProjectCode": "DCS",
- "jiraIssueType": "Task",
- "taskSyncDefinition": "{\"ID\":[\"get\",[\"key\"]],\"Module Category\":[\"get\",[\"fields\",\"customfield_10058\",\"value\"]],\"Summary\":[\"get\",[\"fields\",\"summary\"]],\"Description\":[\"get\",[\"fields\",\"description\"]],\"References\":[\"get\",[\"fields\",\"customfield_10066\"]],\"Priority\":[\"get\",[\"fields\",\"priority\",\"name\"]],\"Issue Status\":[\"get\",[\"fields\",\"status\",\"name\"]],\"Assignee\":[\"get\",[\"fields\",\"assignee\",\"displayName\"]],\"Issue Created\":[\"get\",[\"fields\",\"created\"]],\"Due Date\":[\"get\",[\"fields\",\"duedate\"]],\"DELTA Comments\":[\"get\",[\"fields\",\"customfield_10167\"]],\"SELISE Ticket References\":[\"put\",[\"fields\",\"customfield_10067\"]],\"SELISE Status Values\":[\"put\",[\"fields\",\"customfield_10065\"]],\"SELISE Comments\":[\"put\",[\"fields\",\"customfield_10168\"]]}"
- }
- },
- {
- "template": {
- "overview": "Expenses PDF to Trustee Position",
- "tasks": [
- {
- "id": "Task01",
- "title": "Run trustee pipeline on SharePoint files",
- "description": "Extract expenses from SharePoint PDFs, create positions + documents, sync to accounting",
- "objective": "End-to-end: SharePoint folder → AI extraction → Trustee DB → Accounting sync",
- "actionList": [
- {
- "execMethod": "trustee",
- "execAction": "extractFromFiles",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "sharepointFolder": "{{KEY:sharepointFolder}}",
- "featureInstanceId": "{{KEY:featureInstanceId}}"
- },
- "execResultLabel": "extract_result"
- },
- {
- "execMethod": "trustee",
- "execAction": "processDocuments",
- "execParameters": {
- "documentList": "docList:{{PREV_MESSAGE_ID}}:extract_result",
- "featureInstanceId": "{{KEY:featureInstanceId}}"
- },
- "execResultLabel": "process_result"
- },
- {
- "execMethod": "trustee",
- "execAction": "syncToAccounting",
- "execParameters": {
- "documentList": "docList:{{PREV_MESSAGE_ID}}:process_result",
- "featureInstanceId": "{{KEY:featureInstanceId}}"
- },
- "execResultLabel": "sync_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "",
- "sharepointFolder": "",
- "featureInstanceId": ""
- }
- }
- ]
-}
-
-
-def getAutomationTemplates() -> Dict[str, Any]:
- """
- Get automation templates.
-
- Returns:
- Dict containing the automation templates structure with 'sets' key.
- """
- return AUTOMATION_TEMPLATES
-
diff --git a/modules/features/automation/subAutomationUtils.py b/modules/features/automation/subAutomationUtils.py
deleted file mode 100644
index 97d28719..00000000
--- a/modules/features/automation/subAutomationUtils.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Utility functions for automation feature.
-
-Moved from interfaces/interfaceDbChat.py.
-"""
-
-import json
-from typing import Dict, Any
-from datetime import datetime, UTC
-
-
-def parseScheduleToCron(schedule: str) -> Dict[str, Any]:
- """Parse schedule string to cron kwargs for APScheduler"""
- parts = schedule.split()
- if len(parts) != 5:
- raise ValueError(f"Invalid schedule format: {schedule}")
-
- return {
- "minute": parts[0],
- "hour": parts[1],
- "day": parts[2],
- "month": parts[3],
- "day_of_week": parts[4]
- }
-
-
-def planToPrompt(plan: Dict) -> str:
- """Convert plan structure to prompt string for workflow execution"""
- return plan.get("userMessage", plan.get("overview", "Execute automation workflow"))
-
-
-def replacePlaceholders(template: str, placeholders: Dict[str, str]) -> str:
- """Replace placeholders in template with actual values. Placeholder format: {{KEY:PLACEHOLDER_NAME}} or {{TIMESTAMP}}"""
- result = template
-
- # Replace TIMESTAMP placeholder first (calculated placeholder, not from parameters)
- timestampPattern = "{{TIMESTAMP}}"
- if timestampPattern in result:
- timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
- result = result.replace(timestampPattern, timestamp)
-
- for placeholderName, value in placeholders.items():
- pattern = f"{{{{KEY:{placeholderName}}}}}"
-
- # Check if placeholder is in an array context like ["{{KEY:...}}"]
- # If value is a JSON array/dict, we should replace the entire ["{{KEY:...}}"] with the array
- arrayPattern = f'["{pattern}"]'
- if arrayPattern in result:
- # Check if value is a JSON array/dict
- isArrayValue = False
- arrayValue = None
-
- if isinstance(value, (list, dict)):
- isArrayValue = True
- arrayValue = json.dumps(value)
- elif isinstance(value, str):
- try:
- parsed = json.loads(value)
- if isinstance(parsed, (list, dict)):
- isArrayValue = True
- arrayValue = value # Already valid JSON string
- except (json.JSONDecodeError, ValueError):
- pass
-
- if isArrayValue:
- # Replace ["{{KEY:...}}"] with the array value
- result = result.replace(arrayPattern, arrayValue)
- continue # Skip the regular replacement below
-
- # Regular replacement - check if in quoted context
- patternStart = result.find(pattern)
- isQuoted = False
- if patternStart > 0:
- charBefore = result[patternStart - 1] if patternStart > 0 else None
- patternEnd = patternStart + len(pattern)
- charAfter = result[patternEnd] if patternEnd < len(result) else None
- if charBefore == '"' and charAfter == '"':
- isQuoted = True
-
- # Handle different value types
- if isinstance(value, (list, dict)):
- # Python list/dict - convert to JSON
- replacement = json.dumps(value)
- elif isinstance(value, str):
- # String value - check if it's a JSON string representing list/dict
- try:
- parsed = json.loads(value)
- if isinstance(parsed, (list, dict)):
- # It's a JSON string of a list/dict
- if isQuoted:
- # In quoted context, escape the JSON string
- escaped = json.dumps(value)
- replacement = escaped[1:-1] # Remove outer quotes
- else:
- # In unquoted context, use JSON directly
- replacement = value
- else:
- # It's a JSON string of a primitive
- if isQuoted:
- escaped = json.dumps(value)
- replacement = escaped[1:-1]
- else:
- replacement = value
- except (json.JSONDecodeError, ValueError):
- # Not valid JSON - treat as plain string
- if isQuoted:
- escaped = json.dumps(value)
- replacement = escaped[1:-1]
- else:
- replacement = value
- else:
- # Numbers, booleans, None - convert to string
- replacement = str(value)
- result = result.replace(pattern, replacement)
- return result
-
diff --git a/modules/features/automation2/__init__.py b/modules/features/automation2/__init__.py
deleted file mode 100644
index c86d7e61..00000000
--- a/modules/features/automation2/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# Automation2 feature - n8n-style flow automation (backup/parallel to legacy automation)
diff --git a/modules/features/automation2/datamodelFeatureAutomation2.py b/modules/features/automation2/datamodelFeatureAutomation2.py
deleted file mode 100644
index 97b33754..00000000
--- a/modules/features/automation2/datamodelFeatureAutomation2.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""Automation2 models: Automation2Workflow, Automation2WorkflowRun, Automation2HumanTask."""
-
-from typing import Dict, Any, List, Optional
-from pydantic import BaseModel, Field
-from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
-import uuid
-
-
-class Automation2Workflow(BaseModel):
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Primary key",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
- )
- mandateId: str = Field(
- description="Mandate ID",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
- )
- featureInstanceId: str = Field(
- description="Feature instance ID",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
- )
- label: str = Field(
- description="User-friendly workflow name",
- json_schema_extra={"frontend_type": "text", "frontend_required": True},
- )
- graph: Dict[str, Any] = Field(
- default_factory=dict,
- description="Graph with nodes and connections (incl. node parameters)",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": True},
- )
- active: bool = Field(
- default=True,
- description="Whether workflow is active",
- json_schema_extra={"frontend_type": "checkbox", "frontend_required": False},
- )
- invocations: List[Dict[str, Any]] = Field(
- default_factory=list,
- description="Entry points / starts (manual, form, schedule, webhook, …) configured outside the canvas",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
- )
-
-
-registerModelLabels(
- "Automation2Workflow",
- {"en": "Automation2 Workflow", "de": "Automation2 Workflow", "fr": "Workflow Automation2"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "de": "Feature-Instanz-ID", "fr": "ID instance"},
- "label": {"en": "Label", "de": "Bezeichnung", "fr": "Libellé"},
- "graph": {"en": "Graph", "de": "Graph", "fr": "Graphe"},
- "active": {"en": "Active", "de": "Aktiv", "fr": "Actif"},
- "invocations": {"en": "Starts / Entry points", "de": "Starts / Einstiegspunkte", "fr": "Points d'entrée"},
- },
-)
-
-
-class Automation2WorkflowRun(PowerOnModel):
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Primary key",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
- )
- workflowId: str = Field(
- description="Workflow ID",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
- )
- status: str = Field(
- default="running",
- description="Status: running|paused|completed|failed",
- json_schema_extra={"frontend_type": "text", "frontend_required": False},
- )
- nodeOutputs: Dict[str, Any] = Field(
- default_factory=dict,
- description="Outputs from executed nodes",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
- )
- currentNodeId: Optional[str] = Field(
- default=None,
- description="Node ID when paused (human task)",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
- )
- context: Dict[str, Any] = Field(
- default_factory=dict,
- description="Context for resume (connectionMap, inputSources, etc.)",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
- )
-
-
-registerModelLabels(
- "Automation2WorkflowRun",
- {"en": "Automation2 Workflow Run", "de": "Automation2 Workflow-Ausführung", "fr": "Exécution workflow"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
- "status": {"en": "Status", "de": "Status", "fr": "Statut"},
- "nodeOutputs": {"en": "Node Outputs", "de": "Node-Ausgaben", "fr": "Sorties nœuds"},
- "currentNodeId": {"en": "Current Node", "de": "Aktueller Knoten", "fr": "Nœud actuel"},
- "context": {"en": "Context", "de": "Kontext", "fr": "Contexte"},
- },
-)
-
-
-class Automation2HumanTask(PowerOnModel):
- id: str = Field(
- default_factory=lambda: str(uuid.uuid4()),
- description="Primary key",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
- )
- runId: str = Field(
- description="Workflow run ID",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
- )
- workflowId: str = Field(
- description="Workflow ID",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
- )
- nodeId: str = Field(
- description="Node ID in the graph",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
- )
- nodeType: str = Field(
- description="Node type: form|approval|upload|comment|review|selection|confirmation",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
- )
- config: Dict[str, Any] = Field(
- default_factory=dict,
- description="Node config (form schema, approval text, etc.)",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
- )
- assigneeId: Optional[str] = Field(
- default=None,
- description="User ID assigned to complete the task",
- json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
- )
- status: str = Field(
- default="pending",
- description="Status: pending|completed|rejected",
- json_schema_extra={"frontend_type": "text", "frontend_required": False},
- )
- result: Optional[Dict[str, Any]] = Field(
- default=None,
- description="Task result (form data, approval decision, etc.)",
- json_schema_extra={"frontend_type": "textarea", "frontend_required": False},
- )
-
-
-registerModelLabels(
- "Automation2HumanTask",
- {"en": "Automation2 Human Task", "de": "Automation2 Benutzer-Aufgabe", "fr": "Tâche utilisateur"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "runId": {"en": "Run ID", "de": "Lauf-ID", "fr": "ID exécution"},
- "workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"},
- "nodeId": {"en": "Node ID", "de": "Knoten-ID", "fr": "ID nœud"},
- "nodeType": {"en": "Node Type", "de": "Knotentyp", "fr": "Type nœud"},
- "config": {"en": "Config", "de": "Konfiguration", "fr": "Configuration"},
- "assigneeId": {"en": "Assignee", "de": "Zugewiesen an", "fr": "Assigné à"},
- "status": {"en": "Status", "de": "Status", "fr": "Statut"},
- "result": {"en": "Result", "de": "Ergebnis", "fr": "Résultat"},
- },
-)
diff --git a/modules/features/automation2/nodeDefinitions/ai.py b/modules/features/automation2/nodeDefinitions/ai.py
deleted file mode 100644
index bb85e809..00000000
--- a/modules/features/automation2/nodeDefinitions/ai.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# AI node definitions - map to methodAi actions.
-
-AI_NODES = [
- {
- "id": "ai.prompt",
- "category": "ai",
- "label": {"en": "Prompt", "de": "Prompt", "fr": "Invite"},
- "description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"},
- "parameters": [
- {"name": "prompt", "type": "string", "required": True, "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-robot", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "process",
- "_paramMap": {"prompt": "aiPrompt"},
- },
- {
- "id": "ai.webResearch",
- "category": "ai",
- "label": {"en": "Web Research", "de": "Web-Recherche", "fr": "Recherche web"},
- "description": {"en": "Research on the web", "de": "Recherche im Web", "fr": "Recherche sur le web"},
- "parameters": [
- {"name": "query", "type": "string", "required": True, "description": {"en": "Research query", "de": "Recherche-Anfrage", "fr": "Requête de recherche"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-magnify", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "webResearch",
- "_paramMap": {"query": "prompt"},
- },
- {
- "id": "ai.summarizeDocument",
- "category": "ai",
- "label": {"en": "Summarize Document", "de": "Dokument zusammenfassen", "fr": "Résumer document"},
- "description": {"en": "Summarize document content", "de": "Dokumentinhalt zusammenfassen", "fr": "Résumer le contenu du document"},
- "parameters": [
- {"name": "summaryLength", "type": "string", "required": False, "description": {"en": "Short, medium, or long", "de": "Kurz, mittel oder lang", "fr": "Court, moyen ou long"}, "default": "medium"},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "summarizeDocument",
- "_paramMap": {},
- },
- {
- "id": "ai.translateDocument",
- "category": "ai",
- "label": {"en": "Translate Document", "de": "Dokument übersetzen", "fr": "Traduire document"},
- "description": {"en": "Translate document to target language", "de": "Dokument in Zielsprache übersetzen", "fr": "Traduire le document"},
- "parameters": [
- {"name": "targetLanguage", "type": "string", "required": True, "description": {"en": "Target language (e.g. en, de, fr)", "de": "Zielsprache", "fr": "Langue cible"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-translate", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "translateDocument",
- "_paramMap": {"targetLanguage": "targetLanguage"},
- },
- {
- "id": "ai.convertDocument",
- "category": "ai",
- "label": {"en": "Convert Document", "de": "Dokument konvertieren", "fr": "Convertir document"},
- "description": {"en": "Convert document to another format", "de": "Dokument in anderes Format konvertieren", "fr": "Convertir le document"},
- "parameters": [
- {"name": "targetFormat", "type": "string", "required": True, "description": {"en": "Target format (pdf, docx, txt, etc.)", "de": "Zielformat", "fr": "Format cible"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-convert", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "convertDocument",
- "_paramMap": {"targetFormat": "targetFormat"},
- },
- {
- "id": "ai.generateDocument",
- "category": "ai",
- "label": {"en": "Generate Document", "de": "Dokument generieren", "fr": "Générer document"},
- "description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"},
- "parameters": [
- {"name": "prompt", "type": "string", "required": True, "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-plus", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "generateDocument",
- "_paramMap": {"prompt": "prompt", "format": "format"},
- },
- {
- "id": "ai.generateCode",
- "category": "ai",
- "label": {"en": "Generate Code", "de": "Code generieren", "fr": "Générer code"},
- "description": {"en": "Generate code from description", "de": "Code aus Beschreibung generieren", "fr": "Générer du code"},
- "parameters": [
- {"name": "prompt", "type": "string", "required": True, "description": {"en": "Code generation prompt", "de": "Code-Generierungs-Prompt", "fr": "Invite de génération de code"}},
- {"name": "language", "type": "string", "required": False, "description": {"en": "Programming language", "de": "Programmiersprache", "fr": "Langage de programmation"}, "default": "python"},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-code-tags", "color": "#9C27B0"},
- "_method": "ai",
- "_action": "generateCode",
- "_paramMap": {"prompt": "prompt", "language": "language"},
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/clickup.py b/modules/features/automation2/nodeDefinitions/clickup.py
deleted file mode 100644
index 4acb0db9..00000000
--- a/modules/features/automation2/nodeDefinitions/clickup.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""ClickUp nodes — map to MethodClickup actions."""
-
-CLICKUP_NODES = [
- {
- "id": "clickup.searchTasks",
- "category": "clickup",
- "label": {"en": "Search tasks", "de": "Aufgaben suchen", "fr": "Rechercher tâches"},
- "description": {
- "en": "Search tasks in a workspace (team)",
- "de": "Aufgaben in einem Workspace suchen",
- "fr": "Rechercher des tâches dans un espace",
- },
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
- {"name": "teamId", "type": "string", "required": True, "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}},
- {"name": "query", "type": "string", "required": True, "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}},
- {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0},
- {
- "name": "listId",
- "type": "string",
- "required": False,
- "description": {
- "en": "If set, search this list via list API (not team search).",
- "de": "Wenn gesetzt: Suche in dieser Liste (Listen-API, nicht Team-Suche).",
- "fr": "Si défini : recherche dans cette liste (API liste).",
- },
- },
- {
- "name": "includeClosed",
- "type": "boolean",
- "required": False,
- "default": False,
- "description": {
- "en": "With listId: include closed tasks.",
- "de": "Mit Liste: erledigte Aufgaben einbeziehen.",
- "fr": "Avec liste : inclure les tâches terminées.",
- },
- },
- {
- "name": "fullTaskData",
- "type": "boolean",
- "required": False,
- "default": False,
- "description": {
- "en": "Return full ClickUp API JSON per task (very large). Default: slim fields only.",
- "de": "Vollständige ClickUp-Rohdaten pro Task (sehr groß). Standard: nur schlanke Felder.",
- "fr": "Réponse brute complète (très volumineuse). Par défaut : champs réduits.",
- },
- },
- {
- "name": "matchNameOnly",
- "type": "boolean",
- "required": False,
- "default": True,
- "description": {
- "en": "Keep only tasks whose title contains the search query (default: on).",
- "de": "Nur Aufgaben, deren Titel den Suchbegriff enthält (Standard: an).",
- "fr": "Ne garder que les tâches dont le titre contient la requête (défaut : oui).",
- },
- },
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-magnify", "color": "#7B68EE"},
- "_method": "clickup",
- "_action": "searchTasks",
- "_paramMap": {
- "connectionId": "connectionReference",
- "teamId": "teamId",
- "query": "query",
- "page": "page",
- "listId": "listId",
- "fullTaskData": "fullTaskData",
- "matchNameOnly": "matchNameOnly",
- "includeClosed": "includeClosed",
- },
- },
- {
- "id": "clickup.listTasks",
- "category": "clickup",
- "label": {"en": "List tasks", "de": "Aufgaben auflisten", "fr": "Lister les tâches"},
- "description": {
- "en": "List tasks in a list (pick list path from browse)",
- "de": "Aufgaben einer Liste auflisten (Pfad aus Browse)",
- "fr": "Lister les tâches d'une liste",
- },
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
- {"name": "path", "type": "string", "required": True, "description": {"en": "Virtual path to list /team/.../list/...", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}},
- {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0},
- {"name": "includeClosed", "type": "boolean", "required": False, "description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"},
- "_method": "clickup",
- "_action": "listTasks",
- "_paramMap": {
- "connectionId": "connectionReference",
- "path": "pathQuery",
- "page": "page",
- "includeClosed": "includeClosed",
- },
- },
- {
- "id": "clickup.getTask",
- "category": "clickup",
- "label": {"en": "Get task", "de": "Aufgabe abrufen", "fr": "Obtenir la tâche"},
- "description": {"en": "Get one task by ID or path", "de": "Eine Aufgabe abrufen", "fr": "Obtenir une tâche"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
- {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}},
- {"name": "path", "type": "string", "required": False, "description": {"en": "Or path .../task/{id}", "de": "Oder Pfad .../task/{id}", "fr": "Ou chemin .../task/{id}"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"},
- "_method": "clickup",
- "_action": "getTask",
- "_paramMap": {"connectionId": "connectionReference", "taskId": "taskId", "path": "pathQuery"},
- },
- {
- "id": "clickup.createTask",
- "category": "clickup",
- "label": {"en": "Create task", "de": "Aufgabe erstellen", "fr": "Créer une tâche"},
- "description": {"en": "Create a task in a list", "de": "Aufgabe in einer Liste erstellen", "fr": "Créer une tâche dans une liste"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
- {"name": "teamId", "type": "string", "required": False, "description": {"en": "Workspace (team) for list picker", "de": "Workspace für Listen-Auswahl", "fr": "Équipe"}},
- {"name": "path", "type": "string", "required": False, "description": {"en": "Optional path /team/.../list/...", "de": "Optional: Pfad zur Liste", "fr": "Chemin optionnel"}},
- {"name": "listId", "type": "string", "required": False, "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}},
- {"name": "name", "type": "string", "required": True, "description": {"en": "Task name", "de": "Name", "fr": "Nom"}},
- {"name": "description", "type": "string", "required": False, "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}},
- {"name": "taskStatus", "type": "string", "required": False, "description": {"en": "Status (list status name)", "de": "Status (wie in der Liste)", "fr": "Statut"}},
- {"name": "taskPriority", "type": "string", "required": False, "description": {"en": "1–4 or empty", "de": "1–4 oder leer", "fr": "1–4"}},
- {"name": "taskDueDateMs", "type": "string", "required": False, "description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}},
- {"name": "taskAssigneeIds", "type": "object", "required": False, "description": {"en": "Assignee user ids", "de": "Zugewiesene (User-IDs)", "fr": "Assignés"}},
- {"name": "taskTimeEstimateMs", "type": "string", "required": False, "description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}},
- {"name": "taskTimeEstimateHours", "type": "string", "required": False, "description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (Stunden)", "fr": "Heures"}},
- {"name": "customFieldValues", "type": "object", "required": False, "description": {"en": "Custom field id → value", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}},
- {"name": "taskFields", "type": "string", "required": False, "description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON (fortgeschritten)", "fr": "JSON avancé"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"},
- "_method": "clickup",
- "_action": "createTask",
- "_paramMap": {
- "connectionId": "connectionReference",
- "teamId": "teamId",
- "path": "pathQuery",
- "listId": "listId",
- "name": "name",
- "description": "description",
- "taskStatus": "taskStatus",
- "taskPriority": "taskPriority",
- "taskDueDateMs": "taskDueDateMs",
- "taskAssigneeIds": "taskAssigneeIds",
- "taskTimeEstimateMs": "taskTimeEstimateMs",
- "taskTimeEstimateHours": "taskTimeEstimateHours",
- "customFieldValues": "customFieldValues",
- "taskFields": "taskFields",
- },
- },
- {
- "id": "clickup.updateTask",
- "category": "clickup",
- "label": {"en": "Update task", "de": "Aufgabe aktualisieren", "fr": "Mettre à jour la tâche"},
- "description": {
- "en": "Update task fields (rows or JSON)",
- "de": "Felder der Aufgabe ändern (Zeilen oder JSON)",
- "fr": "Mettre à jour les champs (lignes ou JSON)",
- },
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
- {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}},
- {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}},
- {
- "name": "taskUpdateEntries",
- "type": "object",
- "required": False,
- "description": {
- "en": "List of {fieldKey, value, customFieldId?}",
- "de": "Liste der zu ändernden Felder (fieldKey, value, optional customFieldId)",
- "fr": "Liste de champs à mettre à jour",
- },
- },
- {"name": "taskUpdate", "type": "string", "required": False, "description": {"en": "JSON body for API (optional if rows set)", "de": "JSON für API (optional wenn Zeilen gesetzt)", "fr": "Corps JSON"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"},
- "_method": "clickup",
- "_action": "updateTask",
- "_paramMap": {
- "connectionId": "connectionReference",
- "taskId": "taskId",
- "path": "path",
- "taskUpdate": "taskUpdate",
- },
- },
- {
- "id": "clickup.uploadAttachment",
- "category": "clickup",
- "label": {"en": "Upload attachment", "de": "Anhang hochladen", "fr": "Téléverser pièce jointe"},
- "description": {"en": "Upload file to a task (upstream file)", "de": "Datei an Task anhängen", "fr": "Joindre un fichier à la tâche"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}},
- {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}},
- {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}},
- {"name": "fileName", "type": "string", "required": False, "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-attachment", "color": "#7B68EE"},
- "_method": "clickup",
- "_action": "uploadAttachment",
- "_paramMap": {
- "connectionId": "connectionReference",
- "taskId": "taskId",
- "path": "path",
- "fileName": "fileName",
- },
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/email.py b/modules/features/automation2/nodeDefinitions/email.py
deleted file mode 100644
index b96a5389..00000000
--- a/modules/features/automation2/nodeDefinitions/email.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# Email node definitions - map to methodOutlook actions.
-# Use connectionId from user connections (like AI workspace sources).
-
-EMAIL_NODES = [
- {
- "id": "email.checkEmail",
- "category": "email",
- "label": {"en": "Check Email", "de": "E-Mail prüfen", "fr": "Vérifier email"},
- "description": {"en": "Check for new emails (general or from specific account)", "de": "Neue E-Mails prüfen", "fr": "Vérifier les nouveaux emails"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
- {"name": "folder", "type": "string", "required": False, "description": {"en": "Folder (e.g. Inbox)", "de": "Ordner (z.B. Posteingang)", "fr": "Dossier (ex. Boîte de réception)"}, "default": "Inbox"},
- {"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to fetch", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100},
- {"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""},
- {"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""},
- {"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False},
- {"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw filter (overrides above if set)", "de": "Erweitert: Filter-Text (überschreibt obige)", "fr": "Avancé: filtre brut"}, "default": ""},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-email-check", "color": "#1976D2"},
- "_method": "outlook",
- "_action": "readEmails",
- "_paramMap": {"connectionId": "connectionReference", "folder": "folder", "limit": "limit", "filter": "filter"},
- },
- {
- "id": "email.searchEmail",
- "category": "email",
- "label": {"en": "Search Email", "de": "E-Mail suchen", "fr": "Rechercher email"},
- "description": {"en": "Search or find emails", "de": "E-Mails suchen oder finden", "fr": "Rechercher des emails"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
- {"name": "query", "type": "string", "required": False, "description": {"en": "General search term (searches subject, body, from)", "de": "Suchbegriff (durchsucht Betreff, Inhalt, Absender)", "fr": "Terme de recherche (sujet, corps, expéditeur)"}, "default": ""},
- {"name": "folder", "type": "string", "required": False, "description": {"en": "Folder to search", "de": "Ordner zum Suchen", "fr": "Dossier à rechercher"}, "default": "Inbox"},
- {"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to return", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100},
- {"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""},
- {"name": "toAddress", "type": "string", "required": False, "description": {"en": "Only emails to this recipient", "de": "Nur E-Mails an diesen Empfänger", "fr": "Seulement les e-mails à ce destinataire"}, "default": ""},
- {"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""},
- {"name": "bodyContains", "type": "string", "required": False, "description": {"en": "Body/content must contain this text", "de": "Inhalt muss diesen Text enthalten", "fr": "Le corps doit contenir ce texte"}, "default": ""},
- {"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False},
- {"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw KQL (overrides above if set)", "de": "Erweitert: KQL-Filter (überschreibt obige)", "fr": "Avancé: filtre KQL brut"}, "default": ""},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-email-search", "color": "#1976D2"},
- "_method": "outlook",
- "_action": "searchEmails",
- "_paramMap": {"connectionId": "connectionReference", "query": "query", "folder": "folder", "limit": "limit", "filter": "filter"},
- },
- {
- "id": "email.draftEmail",
- "category": "email",
- "label": {"en": "Draft Email", "de": "E-Mail entwerfen", "fr": "Brouillon email"},
- "description": {"en": "Create a draft email", "de": "E-Mail-Entwurf erstellen", "fr": "Créer un brouillon d'email"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}},
- {"name": "subject", "type": "string", "required": True, "description": {"en": "Email subject", "de": "E-Mail-Betreff", "fr": "Sujet"}},
- {"name": "body", "type": "string", "required": True, "description": {"en": "Email body", "de": "E-Mail-Text", "fr": "Corps de l'email"}},
- {"name": "to", "type": "string", "required": False, "description": {"en": "Recipient(s)", "de": "Empfänger", "fr": "Destinataire(s)"}, "default": ""},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-email-edit", "color": "#1976D2"},
- "_method": "outlook",
- "_action": "composeAndDraftEmailWithContext",
- "_paramMap": {"connectionId": "connectionReference", "to": "to"},
- "_contextFrom": ["subject", "body"],
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/file.py b/modules/features/automation2/nodeDefinitions/file.py
deleted file mode 100644
index bb168218..00000000
--- a/modules/features/automation2/nodeDefinitions/file.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# File node definitions - create files from context (e.g. from AI nodes).
-
-FILE_NODES = [
- {
- "id": "file.create",
- "category": "file",
- "label": {"en": "Create File", "de": "Datei erstellen", "fr": "Créer fichier"},
- "description": {
- "en": "Create a file from context (text/markdown from AI). Configurable format and style.",
- "de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI). Format und Stil konfigurierbar.",
- "fr": "Crée un fichier à partir du contexte. Format et style configurables.",
- },
- "parameters": [
- {
- "name": "contentSources",
- "type": "json",
- "required": False,
- "description": {
- "en": "Array of context refs (e.g. AI, form). Concatenated in order. Empty = from connected node.",
- "de": "Liste von Kontext-Quellen (z.B. KI, Formular). Werden nacheinander zusammengefügt. Leer = vom verbundenen Node.",
- "fr": "Liste de sources de contexte. Concaténées dans l'ordre. Vide = du noeud connecté.",
- },
- "default": [],
- },
- {
- "name": "outputFormat",
- "type": "string",
- "required": True,
- "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"},
- "default": "docx",
- },
- {
- "name": "title",
- "type": "string",
- "required": False,
- "description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"},
- },
- {
- "name": "templateName",
- "type": "string",
- "required": False,
- "description": {"en": "Style preset: default, corporate, minimal", "de": "Stil-Vorlage", "fr": "Prését style"},
- },
- {
- "name": "language",
- "type": "string",
- "required": False,
- "description": {"en": "Language code (de, en, fr)", "de": "Sprachcode", "fr": "Code langue"},
- "default": "de",
- },
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"},
- "_method": "file",
- "_action": "create",
- "_paramMap": {},
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/flow.py b/modules/features/automation2/nodeDefinitions/flow.py
deleted file mode 100644
index 02e25764..00000000
--- a/modules/features/automation2/nodeDefinitions/flow.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# Flow control node definitions.
-
-FLOW_NODES = [
- {
- "id": "flow.ifElse",
- "category": "flow",
- "label": {"en": "If / Else", "de": "Wenn / Sonst", "fr": "Si / Sinon"},
- "description": {"en": "Branch based on condition", "de": "Verzweigung nach Bedingung", "fr": "Branche selon condition"},
- "parameters": [
- {"name": "condition", "type": "string", "required": True, "description": {"en": "Expression to evaluate (e.g. {{value}} > 0)", "de": "Bedingung", "fr": "Condition"}},
- ],
- "inputs": 1,
- "outputs": 2,
- "outputLabels": {"en": ["Yes", "No"], "de": ["Ja", "Nein"], "fr": ["Oui", "Non"]},
- "executor": "flow",
- "meta": {"icon": "mdi-source-branch", "color": "#FF9800"},
- },
- {
- "id": "flow.switch",
- "category": "flow",
- "label": {"en": "Switch", "de": "Switch", "fr": "Switch"},
- "description": {"en": "Multiple branches based on value", "de": "Mehrere Zweige nach Wert", "fr": "Branches multiples selon valeur"},
- "parameters": [
- {"name": "value", "type": "string", "required": True, "description": {"en": "Value to match", "de": "Zu vergleichender Wert", "fr": "Valeur à comparer"}},
- {"name": "cases", "type": "array", "required": False, "description": {"en": "List of cases", "de": "Fälle", "fr": "Cas"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "flow",
- "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"},
- },
- {
- "id": "flow.loop",
- "category": "flow",
- "label": {"en": "Loop / For Each", "de": "Schleife / Für Jedes", "fr": "Boucle / Pour Chaque"},
- "description": {"en": "Iterate over array items", "de": "Über Array-Elemente iterieren", "fr": "Itérer sur les éléments"},
- "parameters": [
- {"name": "items", "type": "string", "required": True, "description": {"en": "Path to array (e.g. {{input.items}})", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "flow",
- "meta": {"icon": "mdi-repeat", "color": "#FF9800"},
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/input.py b/modules/features/automation2/nodeDefinitions/input.py
deleted file mode 100644
index d9c56c78..00000000
--- a/modules/features/automation2/nodeDefinitions/input.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# Input/Human node definitions - nodes that require user action.
-
-INPUT_NODES = [
- {
- "id": "input.form",
- "category": "input",
- "label": {"en": "Form", "de": "Formular", "fr": "Formulaire"},
- "description": {"en": "User fills out a form", "de": "Benutzer füllt ein Formular aus", "fr": "L'utilisateur remplit un formulaire"},
- "parameters": [
- {
- "name": "fields",
- "type": "json",
- "required": True,
- "description": {
- "en": "Form fields: [{name, type, label, required, options?}]. type may include clickup_tasks with clickupConnectionId + clickupListId for a ClickUp task dropdown (value {add, rem}).",
- "de": "Formularfelder. type: u. a. clickup_tasks mit clickupConnectionId und clickupListId für ClickUp-Aufgaben-Dropdown (Wert wie Relationship-Feld).",
- "fr": "Champs du formulaire",
- },
- "default": [],
- },
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-form-textbox", "color": "#9C27B0"},
- },
- {
- "id": "input.approval",
- "category": "input",
- "label": {"en": "Approval", "de": "Genehmigung", "fr": "Approbation"},
- "description": {"en": "User approves or rejects", "de": "Benutzer genehmigt oder lehnt ab", "fr": "L'utilisateur approuve ou rejette"},
- "parameters": [
- {"name": "title", "type": "string", "required": True, "description": {"en": "Approval title", "de": "Genehmigungstitel", "fr": "Titre"}},
- {"name": "description", "type": "string", "required": False, "description": {"en": "What to approve", "de": "Was genehmigt werden soll", "fr": "Ce qu'il faut approuver"}},
- {"name": "approvalType", "type": "string", "required": False, "description": {"en": "Type: document or generic", "de": "Typ: document oder generic", "fr": "Type: document ou generic"}, "default": "generic"},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-check-decagram", "color": "#4CAF50"},
- },
- {
- "id": "input.upload",
- "category": "input",
- "label": {"en": "Upload", "de": "Upload", "fr": "Téléversement"},
- "description": {"en": "User uploads file(s)", "de": "Benutzer lädt Datei(en) hoch", "fr": "L'utilisateur téléverse des fichiers"},
- "parameters": [
- {"name": "accept", "type": "string", "required": False, "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String für Dateiauswahl", "fr": "Chaîne accept"}, "default": ""},
- {"name": "allowedTypes", "type": "json", "required": False, "description": {"en": "Selected file types (from UI multi-select)", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []},
- {"name": "maxSize", "type": "number", "required": False, "description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10},
- {"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-upload", "color": "#2196F3"},
- },
- {
- "id": "input.comment",
- "category": "input",
- "label": {"en": "Comment", "de": "Kommentar", "fr": "Commentaire"},
- "description": {"en": "User adds a comment", "de": "Benutzer fügt einen Kommentar hinzu", "fr": "L'utilisateur ajoute un commentaire"},
- "parameters": [
- {"name": "placeholder", "type": "string", "required": False, "description": {"en": "Placeholder text", "de": "Platzhalter", "fr": "Texte indicatif"}, "default": ""},
- {"name": "required", "type": "boolean", "required": False, "description": {"en": "Comment required", "de": "Kommentar erforderlich", "fr": "Commentaire requis"}, "default": True},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-comment-text", "color": "#FF9800"},
- },
- {
- "id": "input.review",
- "category": "input",
- "label": {"en": "Review", "de": "Prüfung", "fr": "Revue"},
- "description": {"en": "User reviews content", "de": "Benutzer prüft Inhalt", "fr": "L'utilisateur révise le contenu"},
- "parameters": [
- {"name": "contentRef", "type": "string", "required": True, "description": {"en": "Reference to content (e.g. {{nodeId.field}})", "de": "Referenz auf Inhalt", "fr": "Référence au contenu"}},
- {"name": "reviewType", "type": "string", "required": False, "description": {"en": "Type of review", "de": "Art der Prüfung", "fr": "Type de revue"}, "default": "generic"},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-magnify-scan", "color": "#673AB7"},
- },
- {
- "id": "input.selection",
- "category": "input",
- "label": {"en": "Selection", "de": "Auswahl", "fr": "Sélection"},
- "description": {"en": "User selects from options", "de": "Benutzer wählt aus Optionen", "fr": "L'utilisateur choisit parmi les options"},
- "parameters": [
- {
- "name": "options",
- "type": "json",
- "required": True,
- "description": {"en": "Options: [{value, label}]", "de": "Optionen", "fr": "Options"},
- "default": [],
- },
- {"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple selection", "de": "Mehrfachauswahl erlauben", "fr": "Sélection multiple"}, "default": False},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-format-list-checks", "color": "#009688"},
- },
- {
- "id": "input.confirmation",
- "category": "input",
- "label": {"en": "Confirmation", "de": "Bestätigung", "fr": "Confirmation"},
- "description": {"en": "User confirms yes/no", "de": "Benutzer bestätigt Ja/Nein", "fr": "L'utilisateur confirme oui/non"},
- "parameters": [
- {"name": "question", "type": "string", "required": True, "description": {"en": "Question to confirm", "de": "Zu bestätigende Frage", "fr": "Question à confirmer"}},
- {"name": "confirmLabel", "type": "string", "required": False, "description": {"en": "Label for confirm button", "de": "Label für Bestätigen-Button", "fr": "Libellé du bouton confirmer"}, "default": "Confirm"},
- {"name": "rejectLabel", "type": "string", "required": False, "description": {"en": "Label for reject button", "de": "Label für Ablehnen-Button", "fr": "Libellé du bouton refuser"}, "default": "Reject"},
- ],
- "inputs": 1,
- "outputs": 1,
- "executor": "input",
- "meta": {"icon": "mdi-checkbox-marked-circle", "color": "#8BC34A"},
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/sharepoint.py b/modules/features/automation2/nodeDefinitions/sharepoint.py
deleted file mode 100644
index f0dd30cf..00000000
--- a/modules/features/automation2/nodeDefinitions/sharepoint.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# SharePoint node definitions - map to methodSharepoint actions.
-# Use connectionId and path from connection selector (like workflow folder view).
-
-SHAREPOINT_NODES = [
- {
- "id": "sharepoint.findFile",
- "category": "sharepoint",
- "label": {"en": "Find File", "de": "Datei finden", "fr": "Trouver fichier"},
- "description": {"en": "Find file by path or search", "de": "Datei nach Pfad oder Suche finden", "fr": "Trouver fichier par chemin ou recherche"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
- {"name": "searchQuery", "type": "string", "required": True, "description": {"en": "Search query or path", "de": "Suchanfrage oder Pfad", "fr": "Requête ou chemin"}},
- {"name": "site", "type": "string", "required": False, "description": {"en": "Optional site hint", "de": "Optionaler Site-Hinweis", "fr": "Indication de site"}, "default": ""},
- {"name": "maxResults", "type": "number", "required": False, "description": {"en": "Max results", "de": "Max Ergebnisse", "fr": "Max résultats"}, "default": 1000},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-search", "color": "#0078D4"},
- "_method": "sharepoint",
- "_action": "findDocumentPath",
- "_paramMap": {"connectionId": "connectionReference", "searchQuery": "searchQuery", "site": "site", "maxResults": "maxResults"},
- },
- {
- "id": "sharepoint.readFile",
- "category": "sharepoint",
- "label": {"en": "Read File", "de": "Datei lesen", "fr": "Lire fichier"},
- "description": {"en": "Extract content from file", "de": "Inhalt aus Datei extrahieren", "fr": "Extraire le contenu du fichier"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
- {"name": "path", "type": "string", "required": True, "description": {"en": "File path or documentList from find file", "de": "Dateipfad oder documentList von Find", "fr": "Chemin ou documentList"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-file-document", "color": "#0078D4"},
- "_method": "sharepoint",
- "_action": "readDocuments",
- "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
- },
- {
- "id": "sharepoint.uploadFile",
- "category": "sharepoint",
- "label": {"en": "Upload File", "de": "Datei hochladen", "fr": "Téléverser fichier"},
- "description": {"en": "Upload file to SharePoint", "de": "Datei zu SharePoint hochladen", "fr": "Téléverser fichier vers SharePoint"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
- {"name": "path", "type": "string", "required": True, "description": {"en": "Target folder path (e.g. /sites/.../Folder)", "de": "Zielordner-Pfad", "fr": "Chemin du dossier cible"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-upload", "color": "#0078D4"},
- "_method": "sharepoint",
- "_action": "uploadFile",
- "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
- },
- {
- "id": "sharepoint.listFiles",
- "category": "sharepoint",
- "label": {"en": "List Files", "de": "Dateien auflisten", "fr": "Lister fichiers"},
- "description": {"en": "List files in folder or SharePoint", "de": "Dateien in Ordner oder SharePoint auflisten", "fr": "Lister les fichiers dans un dossier"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
- {"name": "path", "type": "string", "required": False, "description": {"en": "Folder path (e.g. /sites/SiteName/Shared Documents)", "de": "Ordnerpfad", "fr": "Chemin du dossier"}, "default": "/"},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-folder-open", "color": "#0078D4"},
- "_method": "sharepoint",
- "_action": "listDocuments",
- "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"},
- },
- {
- "id": "sharepoint.downloadFile",
- "category": "sharepoint",
- "label": {"en": "Download File", "de": "Datei herunterladen", "fr": "Télécharger fichier"},
- "description": {"en": "Download file from path (e.g. /sites/SiteName/Shared Documents/file.pdf)", "de": "Datei vom Pfad herunterladen", "fr": "Télécharger le fichier"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
- {"name": "path", "type": "string", "required": True, "description": {"en": "Full file path (e.g. /sites/SiteName/Shared Documents/file.pdf)", "de": "Vollständiger Dateipfad", "fr": "Chemin complet du fichier"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-download", "color": "#0078D4"},
- "_method": "sharepoint",
- "_action": "downloadFileByPath",
- "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery", "siteId": "siteId", "filePath": "filePath"},
- },
- {
- "id": "sharepoint.copyFile",
- "category": "sharepoint",
- "label": {"en": "Copy File", "de": "Datei kopieren", "fr": "Copier fichier"},
- "description": {"en": "Copy file to destination", "de": "Datei an Ziel kopieren", "fr": "Copier le fichier"},
- "parameters": [
- {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}},
- {"name": "sourcePath", "type": "string", "required": True, "description": {"en": "Source file path (from browse)", "de": "Quelldatei-Pfad", "fr": "Chemin fichier source"}},
- {"name": "destPath", "type": "string", "required": True, "description": {"en": "Destination folder path (from browse)", "de": "Zielordner-Pfad", "fr": "Chemin dossier cible"}},
- ],
- "inputs": 1,
- "outputs": 1,
- "meta": {"icon": "mdi-content-copy", "color": "#0078D4"},
- "_method": "sharepoint",
- "_action": "copyFile",
- "_paramMap": {"connectionId": "connectionReference", "sourcePath": "sourcePath", "destPath": "destPath"},
- },
-]
diff --git a/modules/features/automation2/nodeDefinitions/triggers.py b/modules/features/automation2/nodeDefinitions/triggers.py
deleted file mode 100644
index 5071a762..00000000
--- a/modules/features/automation2/nodeDefinitions/triggers.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# Canvas start nodes — variant reflects workflow configuration (gear in editor).
-
-TRIGGER_NODES = [
- {
- "id": "trigger.manual",
- "category": "trigger",
- "label": {"en": "Start", "de": "Start", "fr": "Départ"},
- "description": {
- "en": "Manual, API, or background triggers (webhook, email, …).",
- "de": "Manuell, API oder Hintergrund-Starts (Webhook, E-Mail, …).",
- "fr": "Manuel, API ou déclencheurs en arrière-plan.",
- },
- "parameters": [],
- "inputs": 0,
- "outputs": 1,
- "executor": "trigger",
- "meta": {"icon": "mdi-play", "color": "#4CAF50"},
- },
- {
- "id": "trigger.form",
- "category": "trigger",
- "label": {"en": "Start (form)", "de": "Start (Formular)", "fr": "Départ (formulaire)"},
- "description": {
- "en": "Form fields are filled at run time; configure fields on this node.",
- "de": "Felder werden beim Start befüllt; konfigurieren Sie die Felder auf dieser Node.",
- "fr": "Les champs sont remplis au démarrage.",
- },
- "parameters": [
- {
- "name": "formFields",
- "type": "json",
- "required": False,
- "description": {"en": "Field definitions", "de": "Felddefinitionen", "fr": "Définitions"},
- },
- ],
- "inputs": 0,
- "outputs": 1,
- "executor": "trigger",
- "meta": {"icon": "mdi-form-select", "color": "#9C27B0"},
- },
- {
- "id": "trigger.schedule",
- "category": "trigger",
- "label": {"en": "Start (schedule)", "de": "Start (Zeitplan)", "fr": "Départ (planification)"},
- "description": {
- "en": "Cron expression for scheduled runs (configure on this node).",
- "de": "Cron-Ausdruck für geplante Läufe.",
- "fr": "Expression cron pour les exécutions planifiées.",
- },
- "parameters": [
- {
- "name": "cron",
- "type": "string",
- "required": False,
- "description": {"en": "Cron expression", "de": "Cron-Ausdruck", "fr": "Expression cron"},
- },
- ],
- "inputs": 0,
- "outputs": 1,
- "executor": "trigger",
- "meta": {"icon": "mdi-clock", "color": "#2196F3"},
- },
-]
diff --git a/modules/features/automation2/nodeRegistry.py b/modules/features/automation2/nodeRegistry.py
deleted file mode 100644
index 4bcc9ba5..00000000
--- a/modules/features/automation2/nodeRegistry.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Node Type Registry for automation2 - static node definitions (ai, email, sharepoint, trigger, flow, data, input).
-Nodes are defined first; IO/method actions are used at execution time.
-"""
-
-import logging
-from typing import Dict, List, Any
-
-from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
-
-logger = logging.getLogger(__name__)
-
-
-def getNodeTypes(
- services: Any = None,
- language: str = "en",
-) -> List[Dict[str, Any]]:
- """
- Return static node types. No dynamic I/O derivation from methodDiscovery.
- services: Optional (kept for API compatibility, not used).
- """
- return list(STATIC_NODE_TYPES)
-
-
-def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
- """Apply language to label/description/parameters."""
- lang = language if language in ("en", "de", "fr") else "en"
- out = dict(node)
- # Strip internal keys for API response
- for key in list(out.keys()):
- if key.startswith("_"):
- del out[key]
- if isinstance(node.get("label"), dict):
- out["label"] = node["label"].get(lang, node["label"].get("en", str(node["label"])))
- if isinstance(node.get("description"), dict):
- out["description"] = node["description"].get(lang, node["description"].get("en", str(node["description"])))
- ol = node.get("outputLabels")
- if isinstance(ol, dict) and ol:
- first = next(iter(ol.values()), None)
- if isinstance(first, (list, tuple)):
- out["outputLabels"] = ol.get(lang, ol.get("en", list(first)))
- params = []
- for p in node.get("parameters", []):
- pc = dict(p)
- if isinstance(p.get("description"), dict):
- pc["description"] = p["description"].get(lang, p["description"].get("en", str(p.get("description", ""))))
- params.append(pc)
- out["parameters"] = params
- return out
-
-
-def getNodeTypesForApi(
- services: Any,
- language: str = "en",
-) -> Dict[str, Any]:
- """
- API-ready response: nodeTypes with localized strings, plus categories list.
- """
- nodes = getNodeTypes(services, language)
- localized = [_localizeNode(n, language) for n in nodes]
- categories = [
- {"id": "trigger", "label": {"en": "Trigger", "de": "Trigger", "fr": "Déclencheur"}},
- {"id": "input", "label": {"en": "Input/Human", "de": "Eingabe/Mensch", "fr": "Entrée/Humain"}},
- {"id": "flow", "label": {"en": "Flow", "de": "Ablauf", "fr": "Flux"}},
- {"id": "data", "label": {"en": "Data", "de": "Daten", "fr": "Données"}},
- {"id": "ai", "label": {"en": "AI", "de": "KI", "fr": "IA"}},
- {"id": "file", "label": {"en": "File", "de": "Datei", "fr": "Fichier"}},
- {"id": "email", "label": {"en": "Email", "de": "E-Mail", "fr": "Email"}},
- {"id": "sharepoint", "label": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"}},
- {"id": "clickup", "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}},
- ]
- return {"nodeTypes": localized, "categories": categories}
-
-
-def getNodeTypeToMethodAction() -> Dict[str, tuple]:
- """
- Mapping from node type id to (method, action) for execution.
- Used by ActionNodeExecutor.
- """
- mapping = {}
- for node in STATIC_NODE_TYPES:
- method = node.get("_method")
- action = node.get("_action")
- if method and action:
- mapping[node["id"]] = (method, action)
- return mapping
diff --git a/modules/features/automation2/routeFeatureAutomation2.py b/modules/features/automation2/routeFeatureAutomation2.py
deleted file mode 100644
index aa40f8bb..00000000
--- a/modules/features/automation2/routeFeatureAutomation2.py
+++ /dev/null
@@ -1,854 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Automation2 routes - node-types, execute, workflows, runs, tasks, connections, browse.
-"""
-
-import logging
-from typing import Any, Dict, Optional
-
-from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException
-from fastapi.responses import JSONResponse
-from modules.auth import limiter, getRequestContext, RequestContext
-
-from modules.features.automation2.mainAutomation2 import getAutomation2Services
-from modules.features.automation2.nodeRegistry import getNodeTypesForApi
-from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface
-from modules.workflows.automation2.executionEngine import executeGraph
-from modules.workflows.automation2.runEnvelope import (
- default_run_envelope,
- merge_run_envelope,
- normalize_run_envelope,
-)
-from modules.features.automation2.entryPoints import find_invocation
-
-logger = logging.getLogger(__name__)
-
-
-def _build_execute_run_envelope(
- body: Dict[str, Any],
- workflow: Optional[Dict[str, Any]],
- user_id: Optional[str],
-) -> Dict[str, Any]:
- """Build normalized run envelope from POST /execute body."""
- if isinstance(body.get("runEnvelope"), dict):
- env = normalize_run_envelope(body["runEnvelope"], user_id=user_id)
- pl = body.get("payload")
- if isinstance(pl, dict):
- env = merge_run_envelope(env, {"payload": pl})
- return env
-
- entry_point_id = body.get("entryPointId")
- if entry_point_id:
- if not workflow:
- raise HTTPException(
- status_code=400,
- detail="entryPointId requires a saved workflow (workflowId must refer to a stored workflow)",
- )
- inv = find_invocation(workflow, entry_point_id)
- if not inv:
- raise HTTPException(status_code=400, detail="entryPointId not found on workflow")
- if not inv.get("enabled", True):
- raise HTTPException(status_code=400, detail="entry point is disabled")
- kind = inv.get("kind", "manual")
- trig_map = {
- "manual": "manual",
- "form": "form",
- "schedule": "schedule",
- "always_on": "event",
- "email": "email",
- "webhook": "webhook",
- "api": "api",
- "event": "event",
- }
- trig = trig_map.get(kind, "manual")
- title = inv.get("title") or {}
- label = ""
- if isinstance(title, dict):
- label = title.get("en") or title.get("de") or ""
- elif isinstance(title, str):
- label = title
- base = default_run_envelope(
- trig,
- entry_point_id=inv.get("id"),
- entry_point_label=label or None,
- )
- pl = body.get("payload")
- if isinstance(pl, dict):
- base = merge_run_envelope(base, {"payload": pl})
- return normalize_run_envelope(base, user_id=user_id)
-
- env = normalize_run_envelope(None, user_id=user_id)
- pl = body.get("payload")
- if isinstance(pl, dict):
- env = merge_run_envelope(env, {"payload": pl})
- return env
-
-router = APIRouter(
- prefix="/api/automation2",
- tags=["Automation2"],
- responses={404: {"description": "Not found"}, 403: {"description": "Forbidden"}},
-)
-
-
-def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
- """Validate user has access to the automation2 feature instance. Returns mandateId."""
- from fastapi import HTTPException
- from modules.interfaces.interfaceDbApp import getRootInterface
-
- rootInterface = getRootInterface()
- instance = rootInterface.getFeatureInstance(instanceId)
- if not instance:
- raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
- featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
- if not featureAccess or not featureAccess.enabled:
- raise HTTPException(status_code=403, detail="Access denied to this feature instance")
- return str(instance.mandateId) if instance.mandateId else ""
-
-
-@router.get("/{instanceId}/info")
-@limiter.limit("60/minute")
-def get_automation2_info(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Minimal info endpoint - proves the feature works."""
- _validateInstanceAccess(instanceId, context)
- return {
- "featureCode": "automation2",
- "instanceId": instanceId,
- "status": "ok",
- "message": "Automation2 feature ready. Build from here.",
- }
-
-
-@router.post("/{instanceId}/schedule-sync")
-@limiter.limit("10/minute")
-def post_schedule_sync(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Manually trigger schedule sync (re-register cron jobs for all schedule workflows)."""
- _validateInstanceAccess(instanceId, context)
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.workflows.automation2.subAutomation2Schedule import sync_automation2_schedule_events
-
- root = getRootInterface()
- event_user = root.getUserByUsername("event")
- if not event_user:
- return {"success": False, "error": "Event user not available", "synced": 0}
- result = sync_automation2_schedule_events(event_user)
- return {"success": True, **result}
-
-
-@router.get("/{instanceId}/node-types")
-@limiter.limit("60/minute")
-def get_node_types(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- language: str = Query("en", description="Localization (en, de, fr)"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Return node types for the flow builder: static + I/O from methodDiscovery."""
- logger.info("automation2 node-types request: instanceId=%s language=%s", instanceId, language)
- mandateId = _validateInstanceAccess(instanceId, context)
- services = getAutomation2Services(
- context.user,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- )
- result = getNodeTypesForApi(services, language=language)
- logger.info(
- "automation2 node-types response: %d nodeTypes %d categories",
- len(result.get("nodeTypes", [])),
- len(result.get("categories", [])),
- )
- return result
-
-
-@router.post("/{instanceId}/execute")
-@limiter.limit("30/minute")
-async def post_execute(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- body: dict = Body(..., description="{ workflowId?, graph: { nodes, connections } }"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Execute automation2 graph. Body: { workflowId?, graph: { nodes, connections } }."""
- userId = str(context.user.id) if context.user else None
- logger.info(
- "automation2 execute request: instanceId=%s userId=%s body_keys=%s",
- instanceId,
- userId,
- list(body.keys()),
- )
- mandateId = _validateInstanceAccess(instanceId, context)
- services = getAutomation2Services(
- context.user,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- )
- # Ensure workflow methods (outlook, ai, sharepoint, etc.) are discovered for ActionExecutor
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods
- discoverMethods(services)
-
- graph = body.get("graph") or body
- workflowId = body.get("workflowId")
- req_nodes = graph.get("nodes") or []
- workflow_for_envelope: Optional[Dict[str, Any]] = None
- if workflowId and not str(workflowId).startswith("transient-"):
- a2_pre = getAutomation2Interface(context.user, mandateId, instanceId)
- workflow_for_envelope = a2_pre.getWorkflow(workflowId)
- # When workflowId is set: prefer graph from request (current editor state) if it has nodes.
- # Only fall back to stored workflow graph when request graph is empty (e.g. resume from email).
- if workflowId and len(req_nodes) == 0:
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- wf = a2.getWorkflow(workflowId)
- if wf and wf.get("graph"):
- graph = wf["graph"]
- logger.info("automation2 execute: loaded graph from workflow %s", workflowId)
- workflow_for_envelope = wf
- # Use transient workflowId when none provided (e.g. execute from editor without save)
- # Required for email.checkEmail pause/resume - run must be created
- if not workflowId:
- import uuid
- workflowId = f"transient-{uuid.uuid4().hex[:12]}"
- logger.info("automation2 execute: using transient workflowId=%s", workflowId)
- nodes_count = len(graph.get("nodes") or [])
- connections_count = len(graph.get("connections") or [])
- logger.info(
- "automation2 execute: graph nodes=%d connections=%d workflowId=%s mandateId=%s",
- nodes_count,
- connections_count,
- workflowId,
- mandateId,
- )
- run_env = _build_execute_run_envelope(body, workflow_for_envelope, userId)
-
- a2_interface = getAutomation2Interface(context.user, mandateId, instanceId)
- result = await executeGraph(
- graph=graph,
- services=services,
- workflowId=workflowId,
- instanceId=instanceId,
- userId=userId,
- mandateId=mandateId,
- automation2_interface=a2_interface,
- run_envelope=run_env,
- )
- logger.info(
- "automation2 execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s",
- result.get("success"),
- result.get("error"),
- list(result.get("nodeOutputs", {}).keys()) if result.get("nodeOutputs") else [],
- result.get("failedNode"),
- result.get("paused"),
- )
- return result
-
-
-# -------------------------------------------------------------------------
-# Connections and Browse (for Email/SharePoint node config - like workspace)
-# -------------------------------------------------------------------------
-
-
-def _buildResolverDbInterface(chatService):
- """Build a DB adapter that ConnectorResolver can use to load UserConnections."""
- class _ResolverDbAdapter:
- def __init__(self, appInterface):
- self._app = appInterface
-
- def getUserConnection(self, connectionId: str):
- if hasattr(self._app, "getUserConnectionById"):
- return self._app.getUserConnectionById(connectionId)
- return None
-
- appIf = getattr(chatService, "interfaceDbApp", None)
- if appIf:
- return _ResolverDbAdapter(appIf)
- return getattr(chatService, "interfaceDbComponent", None)
-
-
-@router.get("/{instanceId}/connections")
-@limiter.limit("300/minute")
-def list_automation2_connections(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Return the user's active connections (UserConnections) for Email/SharePoint node config."""
- mandateId = _validateInstanceAccess(instanceId, context)
- from modules.serviceCenter import getService
- from modules.serviceCenter.context import ServiceCenterContext
- ctx = ServiceCenterContext(
- user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else mandateId,
- feature_instance_id=instanceId,
- )
- chatService = getService("chat", ctx)
- connections = chatService.getUserConnections()
- items = []
- for c in connections or []:
- conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {})
- authority = conn.get("authority")
- if hasattr(authority, "value"):
- authority = authority.value
- status = conn.get("status")
- if hasattr(status, "value"):
- status = status.value
- items.append({
- "id": conn.get("id"),
- "authority": authority,
- "externalUsername": conn.get("externalUsername"),
- "externalEmail": conn.get("externalEmail"),
- "status": status,
- })
- return {"connections": items}
-
-
-@router.get("/{instanceId}/connections/{connectionId}/services")
-@limiter.limit("120/minute")
-async def list_connection_services(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- connectionId: str = Path(..., description="Connection ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Return the available services for a specific UserConnection."""
- mandateId = _validateInstanceAccess(instanceId, context)
- try:
- from modules.connectors.connectorResolver import ConnectorResolver
- from modules.serviceCenter import getService as getSvc
- from modules.serviceCenter.context import ServiceCenterContext
- ctx = ServiceCenterContext(
- user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else mandateId,
- feature_instance_id=instanceId,
- )
- chatService = getSvc("chat", ctx)
- securityService = getSvc("security", ctx)
- dbInterface = _buildResolverDbInterface(chatService)
- resolver = ConnectorResolver(securityService, dbInterface)
- provider = await resolver.resolve(connectionId)
- services = provider.getAvailableServices()
- _serviceLabels = {
- "sharepoint": "SharePoint",
- "clickup": "ClickUp",
- "outlook": "Outlook",
- "teams": "Teams",
- "onedrive": "OneDrive",
- "drive": "Google Drive",
- "gmail": "Gmail",
- "files": "Files (FTP)",
- }
- _serviceIcons = {
- "sharepoint": "sharepoint",
- "clickup": "folder",
- "outlook": "mail",
- "teams": "chat",
- "onedrive": "cloud",
- "drive": "cloud",
- "gmail": "mail",
- "files": "folder",
- }
- items = [
- {"service": s, "label": _serviceLabels.get(s, s), "icon": _serviceIcons.get(s, "folder")}
- for s in services
- ]
- return {"services": items}
- except Exception as e:
- logger.error(f"Error listing services for connection {connectionId}: {e}")
- return JSONResponse({"services": [], "error": str(e)}, status_code=400)
-
-
-@router.get("/{instanceId}/connections/{connectionId}/browse")
-@limiter.limit("300/minute")
-async def browse_connection_service(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- connectionId: str = Path(..., description="Connection ID"),
- service: str = Query(..., description="Service name (e.g. sharepoint, onedrive, outlook)"),
- path: str = Query("/", description="Path within the service to browse"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Browse folders/items within a connection's service at a given path."""
- mandateId = _validateInstanceAccess(instanceId, context)
- try:
- from modules.connectors.connectorResolver import ConnectorResolver
- from modules.serviceCenter import getService as getSvc
- from modules.serviceCenter.context import ServiceCenterContext
- ctx = ServiceCenterContext(
- user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else mandateId,
- feature_instance_id=instanceId,
- )
- chatService = getSvc("chat", ctx)
- securityService = getSvc("security", ctx)
- dbInterface = _buildResolverDbInterface(chatService)
- resolver = ConnectorResolver(securityService, dbInterface)
- adapter = await resolver.resolveService(connectionId, service)
- entries = await adapter.browse(path, filter=None)
- items = []
- for entry in (entries or []):
- items.append({
- "name": entry.name,
- "path": entry.path,
- "isFolder": entry.isFolder,
- "size": entry.size,
- "mimeType": entry.mimeType,
- "metadata": entry.metadata if hasattr(entry, "metadata") else {},
- })
- return {"items": items, "path": path, "service": service}
- except Exception as e:
- logger.error(f"Error browsing {service} for connection {connectionId} at '{path}': {e}")
- return JSONResponse({"items": [], "error": str(e)}, status_code=400)
-
-
-# -------------------------------------------------------------------------
-# Workflow CRUD
-# -------------------------------------------------------------------------
-
-
-def _get_node_label_from_graph(graph: dict, nodeId: str) -> str:
- """Extract human-readable label for a node from graph."""
- if not graph or not nodeId:
- return nodeId or ""
- nodes = graph.get("nodes") or []
- for n in nodes:
- if n.get("id") == nodeId:
- params = n.get("parameters") or {}
- config = params.get("config") or {}
- if isinstance(config, dict):
- label = config.get("title") or config.get("label")
- else:
- label = None
- return (
- n.get("title")
- or label
- or params.get("title")
- or params.get("label")
- or n.get("type", "")
- or nodeId
- )
- return nodeId or ""
-
-
-@router.get("/{instanceId}/workflows")
-@limiter.limit("60/minute")
-def get_workflows(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- active: Optional[bool] = Query(None, description="Filter by active: true|false"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """List all workflows for this feature instance.
- Enriches each workflow with runCount, isRunning, stuckAtNodeId, stuckAtNodeLabel,
- createdAt, lastStartedAt.
- Query param active: filter by active status (true|false).
- """
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- items = a2.getWorkflows(active=active)
- enriched = []
- for wf in items:
- wf_id = wf.get("id")
- runs = a2.getRunsByWorkflow(wf_id) if wf_id else []
- run_count = len(runs)
- active_run = None
- last_started_at = None
- for r in runs:
- ts = r.get("sysCreatedAt")
- if ts and (last_started_at is None or ts > last_started_at):
- last_started_at = ts
- if r.get("status") in ("running", "paused"):
- active_run = r
- stuck_at_node_id = active_run.get("currentNodeId") if active_run else None
- stuck_at_node_label = ""
- if stuck_at_node_id and wf.get("graph"):
- stuck_at_node_label = _get_node_label_from_graph(wf["graph"], stuck_at_node_id)
- enriched.append({
- **wf,
- "runCount": run_count,
- "isRunning": active_run is not None,
- "runStatus": active_run.get("status") if active_run else None,
- "stuckAtNodeId": stuck_at_node_id,
- "stuckAtNodeLabel": stuck_at_node_label or stuck_at_node_id or "",
- "createdAt": wf.get("sysCreatedAt"),
- "lastStartedAt": last_started_at,
- })
- return {"workflows": enriched}
-
-
-@router.get("/{instanceId}/workflows/{workflowId}")
-@limiter.limit("60/minute")
-def get_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Get a single workflow by ID."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- wf = a2.getWorkflow(workflowId)
- if not wf:
- raise HTTPException(status_code=404, detail="Workflow not found")
- return wf
-
-
-@router.post("/{instanceId}/workflows")
-@limiter.limit("30/minute")
-def create_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- body: dict = Body(..., description="{ label, graph }"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Create a new workflow."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- created = a2.createWorkflow(body)
- return created
-
-
-@router.put("/{instanceId}/workflows/{workflowId}")
-@limiter.limit("30/minute")
-def update_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- body: dict = Body(..., description="{ label?, graph? }"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Update a workflow."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- updated = a2.updateWorkflow(workflowId, body)
- if not updated:
- raise HTTPException(status_code=404, detail="Workflow not found")
- return updated
-
-
-@router.delete("/{instanceId}/workflows/{workflowId}")
-@limiter.limit("30/minute")
-def delete_workflow(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Delete a workflow."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- if not a2.deleteWorkflow(workflowId):
- raise HTTPException(status_code=404, detail="Workflow not found")
- return {"success": True}
-
-
-@router.post("/{instanceId}/workflows/{workflowId}/webhooks/{entryPointId}")
-@limiter.limit("60/minute")
-async def post_workflow_webhook(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- entryPointId: str = Path(..., description="Entry point ID (kind must be webhook)"),
- body: dict = Body(default_factory=dict),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """
- Invoke a workflow via a webhook entry point. Optional shared secret in
- X-Automation2-Webhook-Secret or X-Webhook-Secret when config.webhookSecret is set.
- """
- mandateId = _validateInstanceAccess(instanceId, context)
- userId = str(context.user.id) if context.user else None
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- wf = a2.getWorkflow(workflowId)
- if not wf or not wf.get("graph"):
- raise HTTPException(status_code=404, detail="Workflow not found")
- inv = find_invocation(wf, entryPointId)
- if not inv:
- raise HTTPException(status_code=404, detail="Entry point not found")
- if inv.get("kind") != "webhook":
- raise HTTPException(status_code=400, detail="Entry point is not a webhook")
- if not inv.get("enabled", True):
- raise HTTPException(status_code=400, detail="Entry point is disabled")
- cfg = inv.get("config") or {}
- secret = cfg.get("webhookSecret")
- if secret:
- hdr = request.headers.get("X-Automation2-Webhook-Secret") or request.headers.get(
- "X-Webhook-Secret"
- )
- if hdr != str(secret):
- raise HTTPException(status_code=403, detail="Invalid webhook secret")
-
- services = getAutomation2Services(
- context.user,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- )
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods
-
- discoverMethods(services)
-
- title = inv.get("title") or {}
- label = ""
- if isinstance(title, dict):
- label = title.get("en") or title.get("de") or ""
- elif isinstance(title, str):
- label = title
- pl = body if isinstance(body, dict) else {}
- base = default_run_envelope(
- "webhook",
- entry_point_id=inv.get("id"),
- entry_point_label=label or None,
- payload=pl,
- raw={"httpBody": body},
- )
- run_env = normalize_run_envelope(base, user_id=userId)
-
- result = await executeGraph(
- graph=wf["graph"],
- services=services,
- workflowId=workflowId,
- instanceId=instanceId,
- userId=userId,
- mandateId=mandateId,
- automation2_interface=a2,
- run_envelope=run_env,
- )
- return result
-
-
-@router.post("/{instanceId}/workflows/{workflowId}/forms/{entryPointId}/submit")
-@limiter.limit("60/minute")
-async def post_workflow_form_submit(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- entryPointId: str = Path(..., description="Entry point ID (kind must be form)"),
- body: dict = Body(default_factory=dict),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Form-style submit: same as execute with trigger.type form and payload from body."""
- mandateId = _validateInstanceAccess(instanceId, context)
- userId = str(context.user.id) if context.user else None
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- wf = a2.getWorkflow(workflowId)
- if not wf or not wf.get("graph"):
- raise HTTPException(status_code=404, detail="Workflow not found")
- inv = find_invocation(wf, entryPointId)
- if not inv:
- raise HTTPException(status_code=404, detail="Entry point not found")
- if inv.get("kind") != "form":
- raise HTTPException(status_code=400, detail="Entry point is not a form")
- if not inv.get("enabled", True):
- raise HTTPException(status_code=400, detail="Entry point is disabled")
-
- services = getAutomation2Services(
- context.user,
- mandateId=mandateId,
- featureInstanceId=instanceId,
- )
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods
-
- discoverMethods(services)
-
- title = inv.get("title") or {}
- label = ""
- if isinstance(title, dict):
- label = title.get("en") or title.get("de") or ""
- elif isinstance(title, str):
- label = title
- pl = body if isinstance(body, dict) else {}
- base = default_run_envelope(
- "form",
- entry_point_id=inv.get("id"),
- entry_point_label=label or None,
- payload=pl,
- raw={"formBody": body},
- )
- run_env = normalize_run_envelope(base, user_id=userId)
-
- result = await executeGraph(
- graph=wf["graph"],
- services=services,
- workflowId=workflowId,
- instanceId=instanceId,
- userId=userId,
- mandateId=mandateId,
- automation2_interface=a2,
- run_envelope=run_env,
- )
- return result
-
-
-# -------------------------------------------------------------------------
-# Runs and Resume
-# -------------------------------------------------------------------------
-
-
-@router.get("/{instanceId}/runs/completed")
-@limiter.limit("60/minute")
-def get_completed_runs(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- limit: int = Query(20, ge=1, le=50),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Get recently completed runs with output (for Tasks page output section)."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- runs = a2.getRecentCompletedRuns(limit=limit)
- return {"runs": runs}
-
-
-@router.get("/{instanceId}/workflows/{workflowId}/runs")
-@limiter.limit("60/minute")
-def get_workflow_runs(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Path(..., description="Workflow ID"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Get runs for a workflow."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- if not a2.getWorkflow(workflowId):
- raise HTTPException(status_code=404, detail="Workflow not found")
- runs = a2.getRunsByWorkflow(workflowId)
- return {"runs": runs}
-
-
-@router.post("/{instanceId}/runs/{runId}/resume")
-@limiter.limit("30/minute")
-async def resume_run(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- runId: str = Path(..., description="Run ID"),
- body: dict = Body(..., description="{ taskId, result }"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Resume a paused run after task completion."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- run = a2.getRun(runId)
- if not run:
- raise HTTPException(status_code=404, detail="Run not found")
- taskId = body.get("taskId")
- result = body.get("result")
- if not taskId or result is None:
- raise HTTPException(status_code=400, detail="taskId and result required")
- task = a2.getTask(taskId)
- if not task or task.get("runId") != runId:
- raise HTTPException(status_code=404, detail="Task not found")
- if task.get("status") != "pending":
- raise HTTPException(status_code=400, detail="Task already completed")
- a2.updateTask(taskId, status="completed", result=result)
- nodeId = task.get("nodeId")
- nodeOutputs = dict(run.get("nodeOutputs") or {})
- nodeOutputs[nodeId] = result
- runContext = run.get("context") or {}
- connectionMap = runContext.get("connectionMap", {})
- inputSources = runContext.get("inputSources", {})
- workflowId = run.get("workflowId")
- wf = a2.getWorkflow(workflowId) if workflowId else None
- if not wf or not wf.get("graph"):
- raise HTTPException(status_code=400, detail="Workflow graph not found")
- graph = wf["graph"]
- services = getAutomation2Services(context.user, mandateId=mandateId, featureInstanceId=instanceId)
- resume_result = await executeGraph(
- graph=graph,
- services=services,
- workflowId=workflowId,
- instanceId=instanceId,
- userId=str(context.user.id) if context.user else None,
- mandateId=mandateId,
- automation2_interface=a2,
- initialNodeOutputs=nodeOutputs,
- startAfterNodeId=nodeId,
- runId=runId,
- )
- return resume_result
-
-
-# -------------------------------------------------------------------------
-# Tasks
-# -------------------------------------------------------------------------
-
-
-@router.get("/{instanceId}/tasks")
-@limiter.limit("60/minute")
-def get_tasks(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- workflowId: str = Query(None, description="Filter by workflow ID"),
- status: str = Query(None, description="Filter: pending, completed, rejected"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Get tasks - by default those assigned to current user, or all if no assignee filter.
- Enriches each task with workflowLabel and createdAt (from sysCreatedAt).
- """
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- assigneeId = str(context.user.id) if context.user else None
- items = a2.getTasks(workflowId=workflowId, status=status, assigneeId=assigneeId)
- workflows = {w["id"]: w for w in a2.getWorkflows()}
- enriched = []
- for t in items:
- wf = workflows.get(t.get("workflowId") or "")
- enriched.append({
- **t,
- "workflowLabel": wf.get("label", t.get("workflowId", "")) if wf else t.get("workflowId", ""),
- "createdAt": t.get("sysCreatedAt"),
- })
- return {"tasks": enriched}
-
-
-@router.post("/{instanceId}/tasks/{taskId}/complete")
-@limiter.limit("30/minute")
-async def complete_task(
- request: Request,
- instanceId: str = Path(..., description="Feature instance ID"),
- taskId: str = Path(..., description="Task ID"),
- body: dict = Body(..., description="{ result }"),
- context: RequestContext = Depends(getRequestContext),
-) -> dict:
- """Complete a task and resume the run."""
- mandateId = _validateInstanceAccess(instanceId, context)
- a2 = getAutomation2Interface(context.user, mandateId, instanceId)
- task = a2.getTask(taskId)
- if not task:
- raise HTTPException(status_code=404, detail="Task not found")
- runId = task.get("runId")
- result = body.get("result")
- if result is None:
- raise HTTPException(status_code=400, detail="result required")
- run = a2.getRun(runId)
- if not run:
- raise HTTPException(status_code=404, detail="Run not found")
- if task.get("status") != "pending":
- raise HTTPException(status_code=400, detail="Task already completed")
- a2.updateTask(taskId, status="completed", result=result)
- nodeId = task.get("nodeId")
- nodeOutputs = dict(run.get("nodeOutputs") or {})
- nodeOutputs[nodeId] = result
- workflowId = run.get("workflowId")
- wf = a2.getWorkflow(workflowId) if workflowId else None
- if not wf or not wf.get("graph"):
- raise HTTPException(status_code=400, detail="Workflow graph not found")
- graph = wf["graph"]
- services = getAutomation2Services(context.user, mandateId=mandateId, featureInstanceId=instanceId)
- return await executeGraph(
- graph=graph,
- services=services,
- workflowId=workflowId,
- instanceId=instanceId,
- userId=str(context.user.id) if context.user else None,
- mandateId=mandateId,
- automation2_interface=a2,
- initialNodeOutputs=nodeOutputs,
- startAfterNodeId=nodeId,
- runId=runId,
- )
diff --git a/modules/features/chatbot/bridges/ai.py b/modules/features/chatbot/bridges/ai.py
index a06668c8..1962fa8f 100644
--- a/modules/features/chatbot/bridges/ai.py
+++ b/modules/features/chatbot/bridges/ai.py
@@ -462,11 +462,7 @@ class AICenterChatModel(BaseChatModel):
elif isinstance(args_schema, BaseModel):
# It's a Pydantic model instance
if hasattr(args_schema, "model_dump"):
- # Pydantic v2
parameters = args_schema.model_dump()
- elif hasattr(args_schema, "dict"):
- # Pydantic v1
- parameters = args_schema.dict()
elif hasattr(args_schema, "schema"):
# Has schema method (might be a class)
try:
diff --git a/modules/features/chatbot/mainChatbot.py b/modules/features/chatbot/mainChatbot.py
index 33f8ae2f..b50fef79 100644
--- a/modules/features/chatbot/mainChatbot.py
+++ b/modules/features/chatbot/mainChatbot.py
@@ -12,14 +12,14 @@ logger = logging.getLogger(__name__)
# Feature metadata
FEATURE_CODE = "chatbot"
-FEATURE_LABEL = {"en": "Chatbot", "de": "Chatbot", "fr": "Chatbot"}
+FEATURE_LABEL = "Chatbot"
FEATURE_ICON = "mdi-robot"
# UI Objects for RBAC catalog
UI_OBJECTS = [
{
"objectKey": "ui.feature.chatbot.conversations",
- "label": {"en": "Conversations", "de": "Konversationen", "fr": "Conversations"},
+ "label": "Konversationen",
"meta": {"area": "conversations"}
}
]
@@ -28,22 +28,22 @@ UI_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.chatbot.startStream",
- "label": {"en": "Start Chat (Stream)", "de": "Chat starten (Stream)", "fr": "Démarrer chat (Stream)"},
+ "label": "Chat starten (Stream)",
"meta": {"endpoint": "/api/chatbot/{instanceId}/start/stream", "method": "POST"}
},
{
"objectKey": "resource.feature.chatbot.stop",
- "label": {"en": "Stop Chat", "de": "Chat stoppen", "fr": "Arrêter chat"},
+ "label": "Chat stoppen",
"meta": {"endpoint": "/api/chatbot/{instanceId}/stop/{workflowId}", "method": "POST"}
},
{
"objectKey": "resource.feature.chatbot.threads",
- "label": {"en": "Get Threads", "de": "Threads abrufen", "fr": "Récupérer threads"},
+ "label": "Threads abrufen",
"meta": {"endpoint": "/api/chatbot/{instanceId}/threads", "method": "GET"}
},
{
"objectKey": "resource.feature.chatbot.delete",
- "label": {"en": "Delete Chat", "de": "Chat löschen", "fr": "Supprimer chat"},
+ "label": "Chat löschen",
"meta": {"endpoint": "/api/chatbot/{instanceId}/{workflowId}", "method": "DELETE"}
},
]
@@ -74,11 +74,7 @@ REQUIRED_SERVICES = [
TEMPLATE_ROLES = [
{
"roleLabel": "chatbot-viewer",
- "description": {
- "en": "Chatbot Viewer - View chat threads (read-only)",
- "de": "Chatbot Betrachter - Chat-Threads ansehen (nur lesen)",
- "fr": "Visualiseur Chatbot - Consulter les threads (lecture seule)"
- },
+ "description": "Chatbot Betrachter - Chat-Threads ansehen (nur lesen)",
"accessRules": [
# UI: only threads view, NO active chat
{"context": "UI", "item": "ui.feature.chatbot.threads", "view": True},
@@ -90,11 +86,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "chatbot-user",
- "description": {
- "en": "Chatbot User - Use the chatbot and manage own threads",
- "de": "Chatbot Benutzer - Chatbot nutzen und eigene Threads verwalten",
- "fr": "Utilisateur Chatbot - Utiliser le chatbot et gérer ses threads"
- },
+ "description": "Chatbot Benutzer - Chatbot nutzen und eigene Threads verwalten",
"accessRules": [
# UI: full access to all views
{"context": "UI", "item": "ui.feature.chatbot.conversations", "view": True},
@@ -110,11 +102,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "chatbot-admin",
- "description": {
- "en": "Chatbot Admin - Full access to all chatbot features",
- "de": "Chatbot Admin - Vollzugriff auf alle Chatbot-Funktionen",
- "fr": "Administrateur Chatbot - Accès complet à toutes les fonctions chatbot"
- },
+ "description": "Chatbot Admin - Vollzugriff auf alle Chatbot-Funktionen",
"accessRules": [
# Full UI access
{"context": "UI", "item": None, "view": True},
@@ -203,8 +191,8 @@ def getChatStreamingHelper():
def __get_placeholder_user():
"""Placeholder user for contexts that only need service resolution (e.g. ChatStreamingHelper)."""
- from modules.datamodels.datamodelUam import User
- return User(id="system", username="system", email=None, fullName="System Placeholder")
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ return getRootInterface().currentUser
def getEventManager(user, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None):
@@ -391,7 +379,8 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
-
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
+
rootInterface = getRootInterface()
# Get existing template roles for this feature (Pydantic models)
@@ -412,7 +401,7 @@ def _syncTemplateRolesToDb() -> int:
# Create new template role
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None, # Global template
featureInstanceId=None,
diff --git a/modules/features/chatbot/routeFeatureChatbot.py b/modules/features/chatbot/routeFeatureChatbot.py
index 1775a253..fa7ab93c 100644
--- a/modules/features/chatbot/routeFeatureChatbot.py
+++ b/modules/features/chatbot/routeFeatureChatbot.py
@@ -32,6 +32,8 @@ from modules.features.chatbot.interfaceFeatureChatbot import ChatbotConversation
# Import chatbot feature
from modules.features.chatbot import chatProcess
from modules.features.chatbot.mainChatbot import getEventManager
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeFeatureChatbot")
# Pre-warm AI connectors when this router loads (before first request).
# Ensures connectors are ready; avoids 4–8 s delay on first chatbot message.
@@ -150,8 +152,6 @@ def get_chatbot_threads(
if hasattr(workflow, 'model_dump'):
workflow_dict = workflow.model_dump()
- elif hasattr(workflow, 'dict'):
- workflow_dict = workflow.dict()
elif isinstance(workflow, dict):
workflow_dict = dict(workflow)
else:
@@ -267,7 +267,7 @@ async def stream_chatbot_start(
if not workflow:
raise HTTPException(
status_code=500,
- detail="Failed to create or load workflow"
+ detail=routeApiMsg("Failed to create or load workflow")
)
# Get event queue for the workflow
@@ -317,11 +317,11 @@ async def stream_chatbot_start(
# Emit filtered items
for item in filtered_items:
- # Convert Pydantic models to dicts for JSON serialization
+ _inner = item.get("item")
serializable_item = {
"type": item.get("type"),
"createdAt": item.get("createdAt"),
- "item": item.get("item").model_dump() if hasattr(item.get("item"), "model_dump") else (item.get("item").dict() if hasattr(item.get("item"), "dict") else item.get("item"))
+ "item": _inner.model_dump() if _inner is not None and hasattr(_inner, "model_dump") else _inner,
}
# Emit item directly in exact chatData format: {type, createdAt, item}
yield f"data: {json.dumps(serializable_item)}\n\n"
@@ -399,9 +399,6 @@ async def stream_chatbot_start(
if hasattr(item_obj, "model_dump"):
chatdata_item = chatdata_item.copy()
chatdata_item["item"] = item_obj.model_dump()
- elif hasattr(item_obj, "dict"):
- chatdata_item = chatdata_item.copy()
- chatdata_item["item"] = item_obj.dict()
yield f"data: {json.dumps(chatdata_item)}\n\n"
# Handle completion/stopped events to close stream
@@ -567,7 +564,7 @@ def delete_chatbot(
if not success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to delete workflow"
+ detail=routeApiMsg("Failed to delete workflow")
)
return {
diff --git a/modules/features/chatbot/service.py b/modules/features/chatbot/service.py
index a98150b5..5d60cdd7 100644
--- a/modules/features/chatbot/service.py
+++ b/modules/features/chatbot/service.py
@@ -278,7 +278,7 @@ async def _update_conversation_name_async(
# Emit stat event so frontend can refresh thread list/title
workflow = interfaceDbChat.getWorkflow(workflowId)
if workflow:
- wf_dict = workflow.model_dump() if hasattr(workflow, "model_dump") else workflow.dict()
+ wf_dict = workflow.model_dump()
await event_manager.emit_event(
context_id=workflowId,
event_type="chatdata",
@@ -966,7 +966,7 @@ async def _bridge_chatbot_events(
data={
"type": "message",
"createdAt": message_timestamp,
- "item": last_message.dict()
+ "item": last_message.model_dump()
},
event_category="chat"
)
@@ -1005,7 +1005,7 @@ async def _bridge_chatbot_events(
data={
"type": "message",
"createdAt": message_timestamp,
- "item": assistant_msg.dict()
+ "item": assistant_msg.model_dump()
},
event_category="chat"
)
@@ -1089,7 +1089,7 @@ async def _bridge_chatbot_events(
data={
"type": "message",
"createdAt": message_timestamp,
- "item": error_msg.dict()
+ "item": error_msg.model_dump()
},
event_category="chat"
)
@@ -1490,7 +1490,7 @@ async def _processChatbotMessageLangGraph(
data={
"type": "message",
"createdAt": message_timestamp,
- "item": errorMessage.dict()
+ "item": errorMessage.model_dump()
},
event_category="chat"
)
diff --git a/modules/features/commcoach/interfaceFeatureCommcoach.py b/modules/features/commcoach/interfaceFeatureCommcoach.py
index 825fca5d..c9b4564e 100644
--- a/modules/features/commcoach/interfaceFeatureCommcoach.py
+++ b/modules/features/commcoach/interfaceFeatureCommcoach.py
@@ -13,6 +13,7 @@ from modules.datamodels.datamodelUam import User
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.timeUtils import getIsoTimestamp
from modules.shared.configuration import APP_CONFIG
+from modules.shared.i18nRegistry import resolveText, t
from .datamodelCommcoach import (
CoachingContext, CoachingContextStatus,
@@ -412,9 +413,21 @@ def _calcGoalProgress(goalsRaw) -> Optional[int]:
return round(done / len(goals) * 100)
+_LEVELS = [
+ (50, 5, "master", "Meister"),
+ (25, 4, "expert", "Experte"),
+ (10, 3, "advanced", "Fortgeschritten"),
+ (3, 2, "engaged", "Engagiert"),
+]
+t("Meister")
+t("Experte")
+t("Fortgeschritten")
+t("Engagiert")
+t("Einsteiger")
+
+
def _calcLevel(totalSessions: int) -> Dict[str, Any]:
- levels = [(50, 5, "Meister"), (25, 4, "Experte"), (10, 3, "Fortgeschritten"), (3, 2, "Engagiert")]
- for threshold, number, label in levels:
+ for threshold, number, code, labelKey in _LEVELS:
if totalSessions >= threshold:
- return {"number": number, "label": label, "totalSessions": totalSessions}
- return {"number": 1, "label": "Einsteiger", "totalSessions": totalSessions}
+ return {"number": number, "code": code, "label": resolveText(labelKey), "totalSessions": totalSessions}
+ return {"number": 1, "code": "beginner", "label": resolveText("Einsteiger"), "totalSessions": totalSessions}
diff --git a/modules/features/commcoach/mainCommcoach.py b/modules/features/commcoach/mainCommcoach.py
index d21da056..acbd62a6 100644
--- a/modules/features/commcoach/mainCommcoach.py
+++ b/modules/features/commcoach/mainCommcoach.py
@@ -11,23 +11,23 @@ from typing import Dict, List, Any
logger = logging.getLogger(__name__)
FEATURE_CODE = "commcoach"
-FEATURE_LABEL = {"en": "Communication Coach", "de": "Kommunikations-Coach", "fr": "Coach Communication"}
+FEATURE_LABEL = "Kommunikations-Coach"
FEATURE_ICON = "mdi-account-voice"
UI_OBJECTS = [
{
"objectKey": "ui.feature.commcoach.dashboard",
- "label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
+ "label": "Dashboard",
"meta": {"area": "dashboard"}
},
{
"objectKey": "ui.feature.commcoach.coaching",
- "label": {"en": "Coaching & Dossier", "de": "Coaching & Dossier", "fr": "Coaching & Dossier"},
+ "label": "Arbeitsthemen",
"meta": {"area": "coaching"}
},
{
"objectKey": "ui.feature.commcoach.settings",
- "label": {"en": "Settings", "de": "Einstellungen", "fr": "Parametres"},
+ "label": "Einstellungen",
"meta": {"area": "settings"}
},
]
@@ -35,7 +35,7 @@ UI_OBJECTS = [
DATA_OBJECTS = [
{
"objectKey": "data.feature.commcoach.CoachingContext",
- "label": {"en": "Coaching Context", "de": "Coaching-Kontext", "fr": "Contexte coaching"},
+ "label": "Coaching-Kontext",
"meta": {
"table": "CoachingContext",
"fields": ["id", "title", "category", "status"],
@@ -45,7 +45,7 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.commcoach.CoachingSession",
- "label": {"en": "Coaching Session", "de": "Coaching-Session", "fr": "Session coaching"},
+ "label": "Coaching-Session",
"meta": {
"table": "CoachingSession",
"fields": ["id", "contextId", "status", "summary"],
@@ -55,12 +55,12 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.commcoach.CoachingMessage",
- "label": {"en": "Coaching Message", "de": "Coaching-Nachricht", "fr": "Message coaching"},
+ "label": "Coaching-Nachricht",
"meta": {"table": "CoachingMessage", "fields": ["id", "sessionId", "role", "content"]}
},
{
"objectKey": "data.feature.commcoach.CoachingTask",
- "label": {"en": "Coaching Task", "de": "Coaching-Aufgabe", "fr": "Tache coaching"},
+ "label": "Coaching-Aufgabe",
"meta": {
"table": "CoachingTask",
"fields": ["id", "contextId", "title", "status"],
@@ -70,27 +70,27 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.commcoach.CoachingScore",
- "label": {"en": "Coaching Score", "de": "Coaching-Score", "fr": "Score coaching"},
+ "label": "Coaching-Score",
"meta": {"table": "CoachingScore", "fields": ["id", "dimension", "score", "trend"]}
},
{
"objectKey": "data.feature.commcoach.CoachingUserProfile",
- "label": {"en": "User Profile", "de": "Benutzerprofil", "fr": "Profil utilisateur"},
+ "label": "Benutzerprofil",
"meta": {"table": "CoachingUserProfile", "fields": ["id", "userId", "dailyReminderEnabled"]}
},
{
"objectKey": "data.feature.commcoach.CoachingPersona",
- "label": {"en": "Coaching Persona", "de": "Coaching-Persona", "fr": "Persona coaching"},
+ "label": "Coaching-Persona",
"meta": {"table": "CoachingPersona", "fields": ["id", "key", "label", "gender"]}
},
{
"objectKey": "data.feature.commcoach.CoachingBadge",
- "label": {"en": "Coaching Badge", "de": "Coaching-Auszeichnung", "fr": "Badge coaching"},
+ "label": "Coaching-Auszeichnung",
"meta": {"table": "CoachingBadge", "fields": ["id", "badgeKey", "awardedAt"]}
},
{
"objectKey": "data.feature.commcoach.*",
- "label": {"en": "All CommCoach Data", "de": "Alle CommCoach-Daten", "fr": "Toutes les donnees CommCoach"},
+ "label": "Alle CommCoach-Daten",
"meta": {"wildcard": True}
},
]
@@ -98,27 +98,27 @@ DATA_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.commcoach.context.create",
- "label": {"en": "Create Context", "de": "Kontext erstellen", "fr": "Creer contexte"},
+ "label": "Kontext erstellen",
"meta": {"endpoint": "/api/commcoach/{instanceId}/contexts", "method": "POST"}
},
{
"objectKey": "resource.feature.commcoach.context.archive",
- "label": {"en": "Archive Context", "de": "Kontext archivieren", "fr": "Archiver contexte"},
+ "label": "Kontext archivieren",
"meta": {"endpoint": "/api/commcoach/{instanceId}/contexts/{contextId}/archive", "method": "POST"}
},
{
"objectKey": "resource.feature.commcoach.session.start",
- "label": {"en": "Start Session", "de": "Session starten", "fr": "Demarrer session"},
+ "label": "Session starten",
"meta": {"endpoint": "/api/commcoach/{instanceId}/contexts/{contextId}/sessions/start", "method": "POST"}
},
{
"objectKey": "resource.feature.commcoach.session.complete",
- "label": {"en": "Complete Session", "de": "Session abschliessen", "fr": "Terminer session"},
+ "label": "Session abschliessen",
"meta": {"endpoint": "/api/commcoach/{instanceId}/sessions/{sessionId}/complete", "method": "POST"}
},
{
"objectKey": "resource.feature.commcoach.task.manage",
- "label": {"en": "Manage Tasks", "de": "Aufgaben verwalten", "fr": "Gerer taches"},
+ "label": "Aufgaben verwalten",
"meta": {"endpoint": "/api/commcoach/{instanceId}/contexts/{contextId}/tasks", "method": "POST"}
},
]
@@ -126,30 +126,22 @@ RESOURCE_OBJECTS = [
TEMPLATE_ROLES = [
{
"roleLabel": "commcoach-viewer",
- "description": {
- "en": "Communication Coach Viewer - View coaching data (read-only)",
- "de": "Kommunikations-Coach Betrachter - Coaching-Daten ansehen (nur lesen)",
- "fr": "Visualiseur Coach Communication - Consulter les donnees coaching (lecture seule)",
- },
+ "description": "Kommunikations-Coach Betrachter - Coaching-Daten ansehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.commcoach.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.commcoach.coaching", "view": True},
- {"context": "UI", "item": "ui.feature.commcoach.dossier", "view": True},
{"context": "UI", "item": "ui.feature.commcoach.settings", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
+ # Viewer: keine RESOURCE-Endpunkte (Mutationen); Regel explizit fuer konsistente Kontext-Matrix
+ {"context": "RESOURCE", "item": None, "view": False},
],
},
{
"roleLabel": "commcoach-user",
- "description": {
- "en": "Communication Coach User - Can manage own coaching contexts and sessions",
- "de": "Kommunikations-Coach Benutzer - Kann eigene Coaching-Kontexte und Sessions verwalten",
- "fr": "Utilisateur Coach Communication - Peut gerer ses propres contextes et sessions",
- },
+ "description": "Kommunikations-Coach Benutzer - Kann eigene Coaching-Kontexte und Sessions verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.commcoach.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.commcoach.coaching", "view": True},
- {"context": "UI", "item": "ui.feature.commcoach.dossier", "view": True},
{"context": "UI", "item": "ui.feature.commcoach.settings", "view": True},
{"context": "DATA", "item": "data.feature.commcoach.CoachingContext", "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
{"context": "DATA", "item": "data.feature.commcoach.CoachingSession", "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
@@ -166,11 +158,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "commcoach-admin",
- "description": {
- "en": "Communication Coach Admin - All UI and API actions; data scoped to own records",
- "de": "Kommunikations-Coach Admin - Alle UI- und API-Aktionen; Daten nur eigene Datensaetze",
- "fr": "Administrateur Coach Communication - Toute l'UI et les API; donnees propres",
- },
+ "description": "Kommunikations-Coach Admin - Alle UI- und API-Aktionen; Daten nur eigene Datensaetze",
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "RESOURCE", "item": None, "view": True},
@@ -248,9 +236,9 @@ def _seedBuiltinPersonas():
try:
from .serviceCommcoachPersonas import seedBuiltinPersonas
from .interfaceFeatureCommcoach import getInterface
- from modules.datamodels.datamodelUam import User
+ from modules.interfaces.interfaceDbApp import getRootInterface
- systemUser = User(id="system", username="system", email="system@poweron.swiss")
+ systemUser = getRootInterface().currentUser
interface = getInterface(systemUser)
seedBuiltinPersonas(interface)
except Exception as e:
@@ -271,6 +259,7 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
rootInterface = getRootInterface()
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
@@ -287,7 +276,7 @@ def _syncTemplateRolesToDb() -> int:
else:
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
diff --git a/modules/features/commcoach/routeFeatureCommcoach.py b/modules/features/commcoach/routeFeatureCommcoach.py
index 8ffd3eca..99ae798e 100644
--- a/modules/features/commcoach/routeFeatureCommcoach.py
+++ b/modules/features/commcoach/routeFeatureCommcoach.py
@@ -33,6 +33,8 @@ from .datamodelCommcoach import (
StartSessionRequest, CreatePersonaRequest, UpdatePersonaRequest,
)
from .serviceCommcoach import CommcoachService, emitSessionEvent, getSessionEventQueue, cleanupSessionEvents
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeFeatureCommcoach")
logger = logging.getLogger(__name__)
_activeProcessTasks: dict = {}
@@ -78,14 +80,14 @@ def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
raise HTTPException(status_code=404, detail=f"Feature instance '{instanceId}' not found")
mandateId = instance.get("mandateId") if isinstance(instance, dict) else getattr(instance, "mandateId", None)
if not mandateId:
- raise HTTPException(status_code=500, detail="Feature instance has no mandateId")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Feature instance has no mandateId"))
return str(mandateId)
def _validateOwnership(record: dict, context: RequestContext, fieldName: str = "userId") -> None:
"""Strict ownership check. SysAdmin does NOT bypass for content access."""
if record.get(fieldName) != str(context.user.id):
- raise HTTPException(status_code=404, detail="Not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Not found"))
# =========================================================================
@@ -158,7 +160,7 @@ async def getContext(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
tasks = interface.getTasks(contextId, userId)
@@ -187,7 +189,7 @@ async def updateContext(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
updates = body.model_dump(exclude_none=True)
@@ -208,7 +210,7 @@ async def deleteContext(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
interface.deleteContext(contextId)
@@ -228,7 +230,7 @@ async def archiveContext(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
updated = interface.updateContext(contextId, {"status": CoachingContextStatus.ARCHIVED.value})
@@ -249,7 +251,7 @@ async def activateContext(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
updated = interface.updateContext(contextId, {"status": CoachingContextStatus.ACTIVE.value})
@@ -274,7 +276,7 @@ async def listSessions(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
sessions = interface.getSessions(contextId, userId)
@@ -297,7 +299,7 @@ async def startSession(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
activeSession = interface.getActiveSession(contextId, userId)
@@ -420,7 +422,7 @@ async def getSession(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
messages = interface.getMessages(sessionId)
@@ -441,7 +443,7 @@ async def completeSession(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
if session.get("status") != CoachingSessionStatus.ACTIVE.value:
@@ -466,7 +468,7 @@ async def cancelSession(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
from modules.shared.timeUtils import getIsoTimestamp
@@ -496,11 +498,11 @@ async def sendMessageStream(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
if session.get("status") != CoachingSessionStatus.ACTIVE.value:
- raise HTTPException(status_code=400, detail="Session is not active")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Session is not active"))
contextId = session.get("contextId")
service = CommcoachService(context.user, mandateId, instanceId)
@@ -572,15 +574,15 @@ async def sendAudioStream(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
if session.get("status") != CoachingSessionStatus.ACTIVE.value:
- raise HTTPException(status_code=400, detail="Session is not active")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Session is not active"))
audioBody = await request.body()
if not audioBody:
- raise HTTPException(status_code=400, detail="No audio data received")
+ raise HTTPException(status_code=400, detail=routeApiMsg("No audio data received"))
from .serviceCommcoach import _getUserVoicePrefs
language, _ = _getUserVoicePrefs(str(context.user.id), mandateId)
@@ -640,7 +642,7 @@ async def streamSession(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
async def _eventGenerator():
@@ -708,7 +710,7 @@ async def createTask(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
taskData = CoachingTask(
@@ -739,7 +741,7 @@ async def updateTask(
task = interface.getTask(taskId)
if not task:
- raise HTTPException(status_code=404, detail="Task not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Task not found"))
_validateOwnership(task, context)
updates = body.model_dump(exclude_none=True)
@@ -761,7 +763,7 @@ async def updateTaskStatus(
task = interface.getTask(taskId)
if not task:
- raise HTTPException(status_code=404, detail="Task not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Task not found"))
_validateOwnership(task, context)
updates = {"status": body.status.value}
@@ -786,7 +788,7 @@ async def deleteTask(
task = interface.getTask(taskId)
if not task:
- raise HTTPException(status_code=404, detail="Task not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Task not found"))
_validateOwnership(task, context)
interface.deleteTask(taskId)
@@ -867,7 +869,7 @@ async def exportDossier(
ctx = interface.getContext(contextId)
if not ctx:
- raise HTTPException(status_code=404, detail="Context not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Context not found"))
_validateOwnership(ctx, context)
tasks = interface.getTasks(contextId, userId)
@@ -902,7 +904,7 @@ async def exportSession(
session = interface.getSession(sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Session not found"))
_validateOwnership(session, context)
contextId = session.get("contextId")
@@ -983,9 +985,9 @@ async def updatePersonaRoute(
persona = interface.getPersona(personaId)
if not persona:
- raise HTTPException(status_code=404, detail="Persona not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Persona not found"))
if persona.get("category") == "builtin":
- raise HTTPException(status_code=403, detail="Builtin personas cannot be edited")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Builtin personas cannot be edited"))
_validateOwnership(persona, context)
updates = body.model_dump(exclude_none=True)
@@ -1006,9 +1008,9 @@ async def deletePersonaRoute(
persona = interface.getPersona(personaId)
if not persona:
- raise HTTPException(status_code=404, detail="Persona not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Persona not found"))
if persona.get("category") == "builtin":
- raise HTTPException(status_code=403, detail="Builtin personas cannot be deleted")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Builtin personas cannot be deleted"))
_validateOwnership(persona, context)
interface.deletePersona(personaId)
diff --git a/modules/features/commcoach/serviceCommcoachGamification.py b/modules/features/commcoach/serviceCommcoachGamification.py
index 5b8d5eb6..180706de 100644
--- a/modules/features/commcoach/serviceCommcoachGamification.py
+++ b/modules/features/commcoach/serviceCommcoachGamification.py
@@ -7,6 +7,7 @@ Checks and awards badges after each session completion.
import logging
from typing import Dict, Any, List, Optional
+from modules.shared.i18nRegistry import resolveText, t
logger = logging.getLogger(__name__)
@@ -78,6 +79,34 @@ BADGE_DEFINITIONS: Dict[str, Dict[str, Any]] = {
},
}
+# Register all badge labels/descriptions at import time for i18n xx base set
+t("Erste Session")
+t("Deine erste Coaching-Session abgeschlossen")
+t("3-Tage-Serie")
+t("3 Tage in Folge eine Session absolviert")
+t("Wochenserie")
+t("7 Tage in Folge eine Session absolviert")
+t("Monatsserie")
+t("30 Tage in Folge eine Session absolviert")
+t("Engagiert")
+t("5 Sessions abgeschlossen")
+t("Fortgeschritten")
+t("10 Sessions abgeschlossen")
+t("Experte")
+t("25 Sessions abgeschlossen")
+t("Meister")
+t("50 Sessions abgeschlossen")
+t("Bestleistung")
+t("Durchschnittsscore über 80 in einer Session")
+t("Vielseitig")
+t("3 verschiedene Coaching-Themen aktiv")
+t("Rollenspieler")
+t("Erste Roleplay-Session mit einer Persona abgeschlossen")
+t("Ganzheitlich")
+t("In allen 5 Kompetenz-Dimensionen bewertet")
+t("Umsetzer")
+t("10 Coaching-Aufgaben erledigt")
+
async def checkAndAwardBadges(interface, userId: str, mandateId: str, instanceId: str,
session: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
@@ -135,8 +164,8 @@ async def checkAndAwardBadges(interface, userId: str, mandateId: str, instanceId
}
newBadge = interface.awardBadge(badgeData)
definition = BADGE_DEFINITIONS.get(badgeKey, {})
- newBadge["label"] = definition.get("label", badgeKey)
- newBadge["description"] = definition.get("description", "")
+ newBadge["label"] = resolveText(definition.get("label", badgeKey))
+ newBadge["description"] = resolveText(definition.get("description", ""))
newBadge["icon"] = definition.get("icon", "star")
awarded.append(newBadge)
logger.info(f"Badge '{badgeKey}' awarded to user {userId}")
@@ -145,5 +174,12 @@ async def checkAndAwardBadges(interface, userId: str, mandateId: str, instanceId
def getBadgeDefinitions() -> Dict[str, Dict[str, Any]]:
- """Return all badge definitions for the frontend."""
- return BADGE_DEFINITIONS
+ """Return all badge definitions for the frontend (labels resolved via i18n)."""
+ resolved = {}
+ for key, defn in BADGE_DEFINITIONS.items():
+ resolved[key] = {
+ **defn,
+ "label": resolveText(defn["label"]),
+ "description": resolveText(defn["description"]),
+ }
+ return resolved
diff --git a/modules/features/commcoach/tests/test_mainCommcoach.py b/modules/features/commcoach/tests/test_mainCommcoach.py
index 6be563b6..bed151c8 100644
--- a/modules/features/commcoach/tests/test_mainCommcoach.py
+++ b/modules/features/commcoach/tests/test_mainCommcoach.py
@@ -17,9 +17,8 @@ class TestFeatureMetadata:
assert FEATURE_CODE == "commcoach"
def test_featureLabel(self):
- assert "de" in FEATURE_LABEL
- assert "en" in FEATURE_LABEL
- assert "Coach" in FEATURE_LABEL["de"]
+ assert isinstance(FEATURE_LABEL, str)
+ assert "Coach" in FEATURE_LABEL
def test_featureIcon(self):
assert FEATURE_ICON.startswith("mdi-")
@@ -37,17 +36,17 @@ class TestFeatureDefinition:
class TestRbacObjects:
def test_uiObjectsExist(self):
objs = getUiObjects()
- assert len(objs) >= 4
+ assert len(objs) >= 3
keys = [o["objectKey"] for o in objs]
assert "ui.feature.commcoach.dashboard" in keys
assert "ui.feature.commcoach.coaching" in keys
- assert "ui.feature.commcoach.dossier" in keys
assert "ui.feature.commcoach.settings" in keys
def test_uiObjectsHaveLabels(self):
for obj in getUiObjects():
assert "label" in obj
- assert "de" in obj["label"]
+ assert isinstance(obj["label"], str)
+ assert len(obj["label"]) > 0
def test_dataObjectsExist(self):
objs = getDataObjects()
@@ -94,7 +93,7 @@ class TestTemplateRoles:
def test_roleHasDescription(self):
for role in getTemplateRoles():
assert "description" in role
- assert "de" in role["description"]
+ assert isinstance(role["description"], str) and len(role["description"].strip()) > 0
def test_roleHasAccessRules(self):
for role in getTemplateRoles():
diff --git a/modules/features/graphicalEditor/__init__.py b/modules/features/graphicalEditor/__init__.py
new file mode 100644
index 00000000..bb8c0a4b
--- /dev/null
+++ b/modules/features/graphicalEditor/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) 2025 Patrick Motsch
+# GraphicalEditor feature - n8n-style flow automation with visual editor
diff --git a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py
new file mode 100644
index 00000000..c4064385
--- /dev/null
+++ b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py
@@ -0,0 +1,403 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""GraphicalEditor models with Auto-prefix: AutoWorkflow, AutoVersion, AutoRun, AutoStepLog, AutoTask."""
+
+from enum import Enum
+from typing import Dict, Any, List, Optional
+from pydantic import BaseModel, Field
+from modules.datamodels.datamodelBase import PowerOnModel
+from modules.shared.i18nRegistry import i18nModel
+import uuid
+
+
+# ---------------------------------------------------------------------------
+# Enums
+# ---------------------------------------------------------------------------
+
+class AutoWorkflowStatus(str, Enum):
+ DRAFT = "draft"
+ PUBLISHED = "published"
+ ARCHIVED = "archived"
+
+
+class AutoRunStatus(str, Enum):
+ RUNNING = "running"
+ PAUSED = "paused"
+ COMPLETED = "completed"
+ FAILED = "failed"
+ CANCELLED = "cancelled"
+
+
+class AutoStepStatus(str, Enum):
+ PENDING = "pending"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+ SKIPPED = "skipped"
+
+
+class AutoTaskStatus(str, Enum):
+ PENDING = "pending"
+ COMPLETED = "completed"
+ CANCELLED = "cancelled"
+ EXPIRED = "expired"
+
+
+class AutoTemplateScope(str, Enum):
+ USER = "user"
+ INSTANCE = "instance"
+ MANDATE = "mandate"
+ SYSTEM = "system"
+
+
+# ---------------------------------------------------------------------------
+# AutoWorkflow
+# ---------------------------------------------------------------------------
+
+@i18nModel("Workflow")
+class AutoWorkflow(PowerOnModel):
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "ID"},
+ )
+ mandateId: str = Field(
+ description="Mandate ID",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID"},
+ )
+ featureInstanceId: str = Field(
+ description="Feature instance ID",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Feature-Instanz-ID"},
+ )
+ label: str = Field(
+ description="User-friendly workflow name",
+ json_schema_extra={"frontend_type": "text", "frontend_required": True, "label": "Bezeichnung"},
+ )
+ description: Optional[str] = Field(
+ default=None,
+ description="Workflow description",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Beschreibung"},
+ )
+ tags: List[str] = Field(
+ default_factory=list,
+ description="Tags for categorization",
+ json_schema_extra={"frontend_type": "tags", "frontend_required": False, "label": "Tags"},
+ )
+ isTemplate: bool = Field(
+ default=False,
+ description="Whether this workflow is a template",
+ json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Ist Vorlage"},
+ )
+ templateSourceId: Optional[str] = Field(
+ default=None,
+ description="ID of the template this workflow was created from",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Vorlagen-Quelle"},
+ )
+ templateScope: Optional[str] = Field(
+ default=None,
+ description="Template scope: user, instance, mandate, system (AutoTemplateScope)",
+ json_schema_extra={"frontend_type": "select", "frontend_required": False, "label": "Vorlagen-Bereich"},
+ )
+ sharedReadOnly: bool = Field(
+ default=False,
+ description="If true, shared template is read-only for non-owners",
+ json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Freigabe nur-lesen"},
+ )
+ currentVersionId: Optional[str] = Field(
+ default=None,
+ description="ID of the currently published AutoVersion",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Aktuelle Version"},
+ )
+ active: bool = Field(
+ default=True,
+ description="Whether workflow is active",
+ json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Aktiv"},
+ )
+ eventId: Optional[str] = Field(
+ default=None,
+ description="Scheduler event ID for incremental sync",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Event-ID"},
+ )
+ notifyOnFailure: bool = Field(
+ default=True,
+ description="Send notification (in-app + email) when a run fails",
+ json_schema_extra={"frontend_type": "checkbox", "frontend_required": False, "label": "Bei Fehler benachrichtigen"},
+ )
+ # Legacy fields kept for backward compatibility during transition
+ graph: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Graph with nodes and connections (legacy; prefer AutoVersion.graph)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Graph"},
+ )
+ invocations: List[Dict[str, Any]] = Field(
+ default_factory=list,
+ description="Entry points / starts (manual, form, schedule, webhook, ...)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Starts / Einstiegspunkte"},
+ )
+
+
+# ---------------------------------------------------------------------------
+# AutoVersion
+# ---------------------------------------------------------------------------
+
+@i18nModel("Workflow-Version")
+class AutoVersion(PowerOnModel):
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "ID"},
+ )
+ workflowId: str = Field(
+ description="FK -> AutoWorkflow",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID"},
+ )
+ versionNumber: int = Field(
+ default=1,
+ description="Incrementing version number",
+ json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "label": "Version"},
+ )
+ status: str = Field(
+ default=AutoWorkflowStatus.DRAFT.value,
+ description="Version status: draft, published, archived",
+ json_schema_extra={"frontend_type": "select", "frontend_required": False, "label": "Status"},
+ )
+ graph: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Graph with nodes and connections (incl. node parameters)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": True, "label": "Graph"},
+ )
+ invocations: List[Dict[str, Any]] = Field(
+ default_factory=list,
+ description="Entry points / starts for this version",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Einstiegspunkte"},
+ )
+ publishedAt: Optional[float] = Field(
+ default=None,
+ description="Timestamp when version was published",
+ json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Veröffentlicht am"},
+ )
+ publishedBy: Optional[str] = Field(
+ default=None,
+ description="User ID who published this version",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Veröffentlicht von"},
+ )
+
+
+# ---------------------------------------------------------------------------
+# AutoRun
+# ---------------------------------------------------------------------------
+
+@i18nModel("Workflow-Ausführung")
+class AutoRun(PowerOnModel):
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "ID"},
+ )
+ workflowId: str = Field(
+ description="Workflow ID",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID"},
+ )
+ label: Optional[str] = Field(
+ default=None,
+ description="Human-readable run label, set at creation from workflow name or caller",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Bezeichnung"},
+ )
+ mandateId: Optional[str] = Field(
+ default=None,
+ description="Mandate ID for cross-feature querying",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Mandanten-ID"},
+ )
+ ownerId: Optional[str] = Field(
+ default=None,
+ description="User ID who triggered this run",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Auslöser"},
+ )
+ versionId: Optional[str] = Field(
+ default=None,
+ description="AutoVersion ID used for this run",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Versions-ID"},
+ )
+ status: str = Field(
+ default=AutoRunStatus.RUNNING.value,
+ description="Status: running, paused, completed, failed, cancelled",
+ json_schema_extra={"frontend_type": "text", "frontend_required": False, "label": "Status"},
+ )
+ trigger: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Trigger info (type, entryPointId, payload, etc.)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Auslöser"},
+ )
+ startedAt: Optional[float] = Field(
+ default=None,
+ description="Run start timestamp",
+ json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Gestartet am"},
+ )
+ completedAt: Optional[float] = Field(
+ default=None,
+ description="Run completion timestamp",
+ json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Abgeschlossen am"},
+ )
+ nodeOutputs: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Outputs from executed nodes",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Node-Ausgaben"},
+ )
+ currentNodeId: Optional[str] = Field(
+ default=None,
+ description="Node ID when paused (human task / email wait)",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "Aktueller Knoten"},
+ )
+ resumeContext: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Context for resume (connectionMap, inputSources, etc.)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Wiederaufnahme-Kontext"},
+ )
+ error: Optional[str] = Field(
+ default=None,
+ description="Error message if failed",
+ json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False, "label": "Fehler"},
+ )
+ costTokens: int = Field(
+ default=0,
+ description="Total tokens consumed by AI nodes",
+ json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "label": "Verbrauchte Tokens"},
+ )
+ costCredits: float = Field(
+ default=0.0,
+ description="Total credits consumed",
+ json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "label": "Verbrauchte Credits"},
+ )
+
+
+# ---------------------------------------------------------------------------
+# AutoStepLog
+# ---------------------------------------------------------------------------
+
+@i18nModel("Schritt-Protokoll")
+class AutoStepLog(PowerOnModel):
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "ID"},
+ )
+ runId: str = Field(
+ description="FK -> AutoRun",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Lauf-ID"},
+ )
+ nodeId: str = Field(
+ description="Node ID in the graph",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Knoten-ID"},
+ )
+ nodeType: str = Field(
+ description="Node type (e.g. ai.chat, email.send)",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Knotentyp"},
+ )
+ status: str = Field(
+ default=AutoStepStatus.PENDING.value,
+ description="Step status: pending, running, completed, failed, skipped",
+ json_schema_extra={"frontend_type": "text", "frontend_required": False, "label": "Status"},
+ )
+ inputSnapshot: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Snapshot of inputs at execution time",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Eingabe-Snapshot"},
+ )
+ output: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Node output",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Ausgabe"},
+ )
+ error: Optional[str] = Field(
+ default=None,
+ description="Error message if step failed",
+ json_schema_extra={"frontend_type": "textarea", "frontend_readonly": True, "frontend_required": False, "label": "Fehler"},
+ )
+ startedAt: Optional[float] = Field(
+ default=None,
+ description="Step start timestamp",
+ json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Gestartet am"},
+ )
+ completedAt: Optional[float] = Field(
+ default=None,
+ description="Step completion timestamp",
+ json_schema_extra={"frontend_type": "datetime", "frontend_readonly": True, "frontend_required": False, "label": "Abgeschlossen am"},
+ )
+ durationMs: Optional[int] = Field(
+ default=None,
+ description="Execution duration in milliseconds",
+ json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "label": "Dauer (ms)"},
+ )
+ tokensUsed: int = Field(
+ default=0,
+ description="Tokens consumed by this step",
+ json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "label": "Verbrauchte Tokens"},
+ )
+ retryCount: int = Field(
+ default=0,
+ description="Number of retries executed",
+ json_schema_extra={"frontend_type": "number", "frontend_readonly": True, "frontend_required": False, "label": "Wiederholungen"},
+ )
+
+
+# ---------------------------------------------------------------------------
+# AutoTask
+# ---------------------------------------------------------------------------
+
+@i18nModel("Aufgabe")
+class AutoTask(PowerOnModel):
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False, "label": "ID"},
+ )
+ runId: str = Field(
+ description="FK -> AutoRun",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Lauf-ID"},
+ )
+ workflowId: str = Field(
+ description="Workflow ID",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Workflow-ID"},
+ )
+ nodeId: str = Field(
+ description="Node ID in the graph",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Knoten-ID"},
+ )
+ nodeType: str = Field(
+ description="Node type: form, approval, upload, comment, review, selection, confirmation",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True, "label": "Knotentyp"},
+ )
+ config: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Node config (form schema, approval text, etc.)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Konfiguration"},
+ )
+ assigneeId: Optional[str] = Field(
+ default=None,
+ description="User ID assigned to complete the task",
+ json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False, "label": "Zugewiesen an"},
+ )
+ status: str = Field(
+ default=AutoTaskStatus.PENDING.value,
+ description="Status: pending, completed, cancelled, expired",
+ json_schema_extra={"frontend_type": "text", "frontend_required": False, "label": "Status"},
+ )
+ result: Optional[Dict[str, Any]] = Field(
+ default=None,
+ description="Task result (form data, approval decision, etc.)",
+ json_schema_extra={"frontend_type": "textarea", "frontend_required": False, "label": "Ergebnis"},
+ )
+ expiresAt: Optional[float] = Field(
+ default=None,
+ description="Expiration timestamp for the task",
+ json_schema_extra={"frontend_type": "datetime", "frontend_required": False, "label": "Läuft ab am"},
+ )
+
+
+# ---------------------------------------------------------------------------
+# Backward-compatible aliases for transition period
+# ---------------------------------------------------------------------------
+
+Automation2Workflow = AutoWorkflow
+Automation2WorkflowRun = AutoRun
+Automation2HumanTask = AutoTask
diff --git a/modules/features/automation2/emailPoller.py b/modules/features/graphicalEditor/emailPoller.py
similarity index 97%
rename from modules/features/automation2/emailPoller.py
rename to modules/features/graphicalEditor/emailPoller.py
index ca440ca2..7c769463 100644
--- a/modules/features/automation2/emailPoller.py
+++ b/modules/features/graphicalEditor/emailPoller.py
@@ -25,8 +25,8 @@ async def _pollEmailWaits(eventUser) -> None:
Stops the poller when no runs are waiting.
"""
try:
- from modules.features.automation2.interfaceFeatureAutomation2 import getAutomation2Interface
- from modules.features.automation2.mainAutomation2 import getAutomation2Services
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface as getAutomation2Interface
+ from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices as getAutomation2Services
from modules.workflows.automation2.executionEngine import executeGraph
from modules.workflows.processing.shared.methodDiscovery import discoverMethods
from modules.interfaces.interfaceDbApp import getRootInterface
diff --git a/modules/features/automation2/entryPoints.py b/modules/features/graphicalEditor/entryPoints.py
similarity index 88%
rename from modules/features/automation2/entryPoints.py
rename to modules/features/graphicalEditor/entryPoints.py
index 2bcc74ce..9ade2e96 100644
--- a/modules/features/automation2/entryPoints.py
+++ b/modules/features/graphicalEditor/entryPoints.py
@@ -30,22 +30,20 @@ def default_manual_entry_point() -> Dict[str, Any]:
"kind": "manual",
"category": "on_demand",
"enabled": True,
- "title": {
- "de": "Jetzt ausführen",
- "en": "Run now",
- "fr": "Exécuter",
- },
+ "title": "Jetzt ausführen",
"description": {},
"config": {},
}
-def _normalize_title(title: Any) -> Dict[str, str]:
+def _normalize_title(title: Any) -> str:
+ """Extract a plain string from a title value for storage (not display)."""
if isinstance(title, dict):
- return {k: str(v) for k, v in title.items() if v is not None}
+ picked = title.get("xx") or next((v for v in title.values() if v), None)
+ return str(picked).strip() if picked else "Start"
if isinstance(title, str) and title.strip():
- return {"de": title, "en": title, "fr": title}
- return {"de": "Start", "en": "Start", "fr": "Départ"}
+ return title.strip()
+ return "Start"
def normalize_invocation_entry(raw: Dict[str, Any]) -> Dict[str, Any]:
diff --git a/modules/features/automation2/interfaceFeatureAutomation2.py b/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py
similarity index 58%
rename from modules/features/automation2/interfaceFeatureAutomation2.py
rename to modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py
index cec51181..c98135ad 100644
--- a/modules/features/automation2/interfaceFeatureAutomation2.py
+++ b/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py
@@ -1,8 +1,8 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
-Interface for Automation2 feature - Workflows, Runs, Human Tasks.
-Uses PostgreSQL poweron_automation2 database.
+Interface for GraphicalEditor feature - Workflows, Runs, Human Tasks.
+Uses PostgreSQL poweron_graphicaleditor database (Greenfield).
"""
import base64
@@ -25,38 +25,50 @@ def _make_json_serializable(obj: Any) -> Any:
return obj
from modules.datamodels.datamodelUam import User
-from modules.features.automation2.datamodelFeatureAutomation2 import (
- Automation2Workflow,
- Automation2WorkflowRun,
- Automation2HumanTask,
+from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
+ AutoWorkflow,
+ AutoVersion,
+ AutoRun,
+ AutoStepLog,
+ AutoTask,
+ AutoWorkflow as Automation2Workflow,
+ AutoRun as Automation2WorkflowRun,
+ AutoTask as Automation2HumanTask,
)
-from modules.features.automation2.entryPoints import normalize_invocations_list
+from modules.features.graphicalEditor.entryPoints import normalize_invocations_list
from modules.connectors.connectorDbPostgre import DatabaseConnector
from modules.shared.configuration import APP_CONFIG
logger = logging.getLogger(__name__)
+_GREENFIELD_DB = "poweron_graphicaleditor"
+_CALLBACK_WORKFLOW_CHANGED = "graphicalEditor.workflow.changed"
-def getAutomation2Interface(
+
+def getGraphicalEditorInterface(
currentUser: User,
mandateId: str,
featureInstanceId: str,
-) -> "Automation2Objects":
- """Factory for Automation2 interface with user context."""
- return Automation2Objects(
+) -> "GraphicalEditorObjects":
+ """Factory for GraphicalEditor interface with user context."""
+ return GraphicalEditorObjects(
currentUser=currentUser,
mandateId=mandateId,
featureInstanceId=featureInstanceId,
)
+# Backward-compatible alias used by workflows/automation2/ execution engine
+getAutomation2Interface = getGraphicalEditorInterface
+
+
def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
"""
- Get all active Automation2 workflows that have a schedule entry point (primary invocation).
+ Get all active workflows that have a schedule entry point (primary invocation).
Used by the scheduler to register cron jobs. Does not filter by mandate/instance.
"""
dbHost = APP_CONFIG.get("DB_HOST", "localhost")
- dbDatabase = "poweron_automation2"
+ dbDatabase = _GREENFIELD_DB
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
@@ -69,10 +81,8 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
userId=None,
)
if not connector._ensureTableExists(Automation2Workflow):
- logger.warning("Automation2 schedule: table Automation2Workflow does not exist")
+ logger.warning("GraphicalEditor schedule: table Automation2Workflow does not exist yet")
return []
- # Don't filter by active in SQL: existing workflows may have active=NULL.
- # Treat NULL as active; skip only when active is explicitly False.
records = connector.getRecordset(
Automation2Workflow,
recordFilter=None,
@@ -89,7 +99,6 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
if not isinstance(primary, dict):
primary = {}
- # Cron comes from graph start node params (trigger.schedule)
graph = wf.get("graph") or {}
nodes = graph.get("nodes") or []
cron = None
@@ -103,7 +112,6 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
if not cron or not isinstance(cron, str) or not cron.strip():
continue
- # Prefer invocations; if graph has trigger.schedule but invocations say manual, still schedule
if primary.get("kind") == "schedule" and primary.get("enabled", True):
entry_point_id = primary.get("id")
elif invocations and isinstance(invocations[0], dict) and invocations[0].get("id"):
@@ -120,15 +128,15 @@ def getAllWorkflowsForScheduling() -> List[Dict[str, Any]]:
"workflow": wf,
})
logger.info(
- "Automation2 schedule: DB has %d workflow(s), %d active with trigger.schedule+cron",
+ "GraphicalEditor schedule: DB has %d workflow(s), %d active with trigger.schedule+cron",
raw_count,
len(result),
)
return result
-class Automation2Objects:
- """Interface for Automation2 database operations."""
+class GraphicalEditorObjects:
+ """Interface for GraphicalEditor database operations (Greenfield DB)."""
def __init__(
self,
@@ -145,9 +153,9 @@ class Automation2Objects:
self.db.updateContext(self.userId)
def _init_db(self):
- """Initialize database connection to poweron_automation2."""
+ """Initialize database connection to poweron_graphicaleditor (Greenfield)."""
dbHost = APP_CONFIG.get("DB_HOST", "localhost")
- dbDatabase = "poweron_automation2"
+ dbDatabase = _GREENFIELD_DB
dbUser = APP_CONFIG.get("DB_USER")
dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD")
dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
@@ -159,16 +167,14 @@ class Automation2Objects:
dbPort=dbPort,
userId=self.userId,
)
- logger.debug("Automation2 database initialized for user %s", self.userId)
+ logger.debug("GraphicalEditor database initialized for user %s", self.userId)
# -------------------------------------------------------------------------
# Workflow CRUD
# -------------------------------------------------------------------------
def getWorkflows(self, active: Optional[bool] = None) -> List[Dict[str, Any]]:
- """Get all workflows for this mandate and feature instance.
- Optional active filter: True=only active, False=only inactive, None=all.
- """
+ """Get all workflows for this mandate and feature instance."""
if not self.db._ensureTableExists(Automation2Workflow):
return []
rf: Dict[str, Any] = {
@@ -218,7 +224,7 @@ class Automation2Objects:
out["invocations"] = normalize_invocations_list(out.get("invocations"))
try:
from modules.shared.callbackRegistry import callbackRegistry
- callbackRegistry.trigger("automation2.workflow.changed")
+ callbackRegistry.trigger(_CALLBACK_WORKFLOW_CHANGED)
except Exception:
pass
return out
@@ -228,7 +234,6 @@ class Automation2Objects:
existing = self.getWorkflow(workflowId)
if not existing:
return None
- # Don't overwrite mandateId/featureInstanceId
data.pop("mandateId", None)
data.pop("featureInstanceId", None)
if "invocations" in data:
@@ -238,7 +243,7 @@ class Automation2Objects:
out["invocations"] = normalize_invocations_list(out.get("invocations"))
try:
from modules.shared.callbackRegistry import callbackRegistry
- callbackRegistry.trigger("automation2.workflow.changed")
+ callbackRegistry.trigger(_CALLBACK_WORKFLOW_CHANGED)
except Exception:
pass
return out
@@ -251,7 +256,7 @@ class Automation2Objects:
self.db.recordDelete(Automation2Workflow, workflowId)
try:
from modules.shared.callbackRegistry import callbackRegistry
- callbackRegistry.trigger("automation2.workflow.changed")
+ callbackRegistry.trigger(_CALLBACK_WORKFLOW_CHANGED)
except Exception:
pass
return True
@@ -260,15 +265,24 @@ class Automation2Objects:
# Workflow Runs
# -------------------------------------------------------------------------
- def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None) -> Dict[str, Any]:
- """Create a new workflow run."""
+ def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None, label: str = None) -> Dict[str, Any]:
+ """Create a new workflow run.
+
+ *label* – human-readable name persisted on the run. Callers should
+ pass the workflow label or a descriptive name; ``executeGraph`` fills
+ in a fallback when nothing is provided.
+ """
+ ctx = context or {}
data = {
"id": str(uuid.uuid4()),
"workflowId": workflowId,
+ "label": label,
"status": "running",
"nodeOutputs": _make_json_serializable(nodeOutputs or {}),
"currentNodeId": None,
- "context": context or {},
+ "context": ctx,
+ "mandateId": ctx.get("mandateId") or self.mandateId,
+ "ownerId": ctx.get("userId") or (self.currentUser.id if self.currentUser else None),
}
created = self.db.recordCreate(Automation2WorkflowRun, data)
return dict(created)
@@ -322,7 +336,7 @@ class Automation2Objects:
return [dict(r) for r in records] if records else []
def getRecentCompletedRuns(self, limit: int = 20) -> List[Dict[str, Any]]:
- """Get recently completed runs for workflows in this instance (for output display)."""
+ """Get recent runs (all statuses) for workflows in this instance."""
if not self.db._ensureTableExists(Automation2WorkflowRun):
return []
workflows = self.getWorkflows()
@@ -331,7 +345,7 @@ class Automation2Objects:
return []
records = self.db.getRecordset(
Automation2WorkflowRun,
- recordFilter={"status": "completed"},
+ recordFilter={},
)
if not records:
return []
@@ -426,10 +440,7 @@ class Automation2Objects:
status: str = None,
assigneeId: str = None,
) -> List[Dict[str, Any]]:
- """Get tasks with optional filters.
- When assigneeId is set: returns tasks assigned to that user OR unassigned (so schedule tasks show up).
- When assigneeId is None: returns all tasks.
- """
+ """Get tasks with optional filters."""
if not self.db._ensureTableExists(Automation2HumanTask):
return []
base_rf: Dict[str, Any] = {}
@@ -461,3 +472,187 @@ class Automation2Objects:
workflows = {w["id"]: w for w in self.getWorkflows()}
filtered = [t for t in items if t.get("workflowId") in workflows]
return filtered
+
+
+ # -------------------------------------------------------------------------
+ # Versions (AutoVersion Lifecycle)
+ # -------------------------------------------------------------------------
+
+ def getVersions(self, workflowId: str) -> List[Dict[str, Any]]:
+ """Get all versions for a workflow, ordered by versionNumber desc."""
+ if not self.db._ensureTableExists(AutoVersion):
+ return []
+ records = self.db.getRecordset(AutoVersion, recordFilter={"workflowId": workflowId})
+ versions = [dict(r) for r in records] if records else []
+ versions.sort(key=lambda v: v.get("versionNumber", 0), reverse=True)
+ return versions
+
+ def getVersion(self, versionId: str) -> Optional[Dict[str, Any]]:
+ """Get a single version by ID."""
+ if not self.db._ensureTableExists(AutoVersion):
+ return None
+ record = self.db.getRecord(AutoVersion, versionId)
+ return dict(record) if record else None
+
+ def createDraftVersion(self, workflowId: str) -> Optional[Dict[str, Any]]:
+ """Create a new draft version from the workflow's current graph."""
+ wf = self.getWorkflow(workflowId)
+ if not wf:
+ return None
+ existing = self.getVersions(workflowId)
+ nextNumber = max((v.get("versionNumber", 0) for v in existing), default=0) + 1
+ import time
+ data = {
+ "id": str(uuid.uuid4()),
+ "workflowId": workflowId,
+ "versionNumber": nextNumber,
+ "status": "draft",
+ "graph": wf.get("graph", {}),
+ "invocations": wf.get("invocations", []),
+ }
+ created = self.db.recordCreate(AutoVersion, data)
+ return dict(created)
+
+ def publishVersion(self, versionId: str, userId: str = None) -> Optional[Dict[str, Any]]:
+ """Publish a draft version. Archives the previously published version."""
+ version = self.getVersion(versionId)
+ if not version or version.get("status") != "draft":
+ return None
+ workflowId = version.get("workflowId")
+ existing = self.getVersions(workflowId)
+ for v in existing:
+ if v.get("status") == "published" and v.get("id") != versionId:
+ self.db.recordModify(AutoVersion, v["id"], {"status": "archived"})
+ import time
+ updated = self.db.recordModify(AutoVersion, versionId, {
+ "status": "published",
+ "publishedAt": time.time(),
+ "publishedBy": userId,
+ })
+ if workflowId:
+ self.db.recordModify(AutoWorkflow, workflowId, {
+ "currentVersionId": versionId,
+ "graph": version.get("graph", {}),
+ "invocations": version.get("invocations", []),
+ })
+ return dict(updated)
+
+ def unpublishVersion(self, versionId: str) -> Optional[Dict[str, Any]]:
+ """Revert a published version back to draft status."""
+ version = self.getVersion(versionId)
+ if not version or version.get("status") != "published":
+ return None
+ workflowId = version.get("workflowId")
+ updated = self.db.recordModify(AutoVersion, versionId, {
+ "status": "draft",
+ "publishedAt": None,
+ "publishedBy": None,
+ })
+ if workflowId:
+ self.db.recordModify(AutoWorkflow, workflowId, {"currentVersionId": None})
+ return dict(updated)
+
+ def archiveVersion(self, versionId: str) -> Optional[Dict[str, Any]]:
+ """Archive a version."""
+ version = self.getVersion(versionId)
+ if not version:
+ return None
+ updated = self.db.recordModify(AutoVersion, versionId, {"status": "archived"})
+ return dict(updated)
+
+ # -------------------------------------------------------------------------
+ # Templates
+ # -------------------------------------------------------------------------
+
+ def getTemplates(self, scope: str = None) -> List[Dict[str, Any]]:
+ """Get workflow templates, optionally filtered by scope.
+ Always includes system-scope templates (mandateId=None) alongside mandate-owned ones.
+ """
+ if not self.db._ensureTableExists(AutoWorkflow):
+ return []
+ rf: Dict[str, Any] = {
+ "mandateId": self.mandateId,
+ "featureInstanceId": self.featureInstanceId,
+ "isTemplate": True,
+ }
+ if scope:
+ rf["templateScope"] = scope
+ records = self.db.getRecordset(AutoWorkflow, recordFilter=rf) or []
+
+ if scope is None or scope == "system":
+ systemFilter: Dict[str, Any] = {
+ "isTemplate": True,
+ "templateScope": "system",
+ "mandateId": None,
+ }
+ systemRecords = self.db.getRecordset(AutoWorkflow, recordFilter=systemFilter) or []
+ seenIds = {(r.get("id") if isinstance(r, dict) else getattr(r, "id", None)) for r in records}
+ for sr in systemRecords:
+ srId = sr.get("id") if isinstance(sr, dict) else getattr(sr, "id", None)
+ if srId not in seenIds:
+ records.append(sr)
+
+ return [dict(r) for r in records] if records else []
+
+ def createTemplateFromWorkflow(self, workflowId: str, scope: str = "user") -> Optional[Dict[str, Any]]:
+ """Create a template by copying the published AutoVersion's graph (or workflow graph as fallback)."""
+ wf = self.getWorkflow(workflowId)
+ if not wf:
+ return None
+ graph = wf.get("graph", {})
+ invocations = wf.get("invocations", [])
+ currentVersionId = wf.get("currentVersionId")
+ if currentVersionId:
+ version = self.getVersion(currentVersionId)
+ if version:
+ graph = version.get("graph", graph)
+ invocations = version.get("invocations", invocations)
+ data = {
+ "id": str(uuid.uuid4()),
+ "mandateId": self.mandateId,
+ "featureInstanceId": self.featureInstanceId,
+ "label": f"{wf.get('label', 'Workflow')} (Template)",
+ "graph": graph,
+ "invocations": invocations,
+ "isTemplate": True,
+ "templateScope": scope,
+ "templateSourceId": workflowId,
+ "active": False,
+ }
+ created = self.db.recordCreate(AutoWorkflow, data)
+ return dict(created)
+
+ def copyTemplateToUser(self, templateId: str) -> Optional[Dict[str, Any]]:
+ """Copy a template to a new user-owned workflow with templateScope='user'."""
+ template = self.getWorkflow(templateId)
+ if not template or not template.get("isTemplate"):
+ return None
+ data = {
+ "id": str(uuid.uuid4()),
+ "mandateId": self.mandateId,
+ "featureInstanceId": self.featureInstanceId,
+ "label": template.get("label", "Workflow").replace(" (Template)", ""),
+ "graph": template.get("graph", {}),
+ "invocations": template.get("invocations", []),
+ "isTemplate": False,
+ "templateSourceId": templateId,
+ "templateScope": "user",
+ "active": True,
+ }
+ created = self.db.recordCreate(AutoWorkflow, data)
+ return dict(created)
+
+ def shareTemplate(self, templateId: str, scope: str) -> Optional[Dict[str, Any]]:
+ """Change a template's scope. Sets sharedReadOnly=True for shared scopes, False for user scope."""
+ template = self.getWorkflow(templateId)
+ if not template or not template.get("isTemplate"):
+ return None
+ updated = self.db.recordModify(AutoWorkflow, templateId, {
+ "templateScope": scope,
+ "sharedReadOnly": scope != "user",
+ })
+ return dict(updated)
+
+
+# Backward-compatible alias
+Automation2Objects = GraphicalEditorObjects
diff --git a/modules/features/automation2/mainAutomation2.py b/modules/features/graphicalEditor/mainGraphicalEditor.py
similarity index 68%
rename from modules/features/automation2/mainAutomation2.py
rename to modules/features/graphicalEditor/mainGraphicalEditor.py
index 80c8f854..a2bff9bc 100644
--- a/modules/features/automation2/mainAutomation2.py
+++ b/modules/features/graphicalEditor/mainGraphicalEditor.py
@@ -1,7 +1,7 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
"""
-Automation2 Feature - n8n-style flow automation.
+GraphicalEditor Feature - n8n-style flow automation.
Minimal bootstrap for feature instance creation. Build from here.
"""
@@ -10,9 +10,8 @@ from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
-FEATURE_CODE = "automation2"
+FEATURE_CODE = "graphicalEditor"
-# Services required for automation2 (methodDiscovery, ActionExecutor, etc.)
REQUIRED_SERVICES = [
{"serviceKey": "chat", "meta": {"usage": "Interfaces, RBAC"}},
{"serviceKey": "utils", "meta": {"usage": "Timestamps, utilities"}},
@@ -22,83 +21,78 @@ REQUIRED_SERVICES = [
{"serviceKey": "clickup", "meta": {"usage": "ClickUp actions"}},
{"serviceKey": "generation", "meta": {"usage": "file.create document rendering"}},
]
-FEATURE_LABEL = {"en": "Automation 2", "de": "Automatisierung 2", "fr": "Automatisation 2"}
+FEATURE_LABEL = "Grafischer Editor"
FEATURE_ICON = "mdi-sitemap"
UI_OBJECTS = [
{
- "objectKey": "ui.feature.automation2.editor",
- "label": {"en": "Editor", "de": "Editor", "fr": "Éditeur"},
+ "objectKey": "ui.feature.graphicalEditor.editor",
+ "label": "Editor",
"meta": {"area": "editor"}
},
{
- "objectKey": "ui.feature.automation2.workflows",
- "label": {"en": "Workflows", "de": "Workflows", "fr": "Workflows"},
+ "objectKey": "ui.feature.graphicalEditor.workflows",
+ "label": "Workflows",
"meta": {"area": "workflows"}
},
{
- "objectKey": "ui.feature.automation2.workflows-tasks",
- "label": {"en": "Tasks", "de": "Tasks", "fr": "Tâches"},
+ "objectKey": "ui.feature.graphicalEditor.templates",
+ "label": "Vorlagen",
+ "meta": {"area": "templates"}
+ },
+ {
+ "objectKey": "ui.feature.graphicalEditor.workflows-tasks",
+ "label": "Tasks",
"meta": {"area": "tasks"}
},
]
RESOURCE_OBJECTS = [
{
- "objectKey": "resource.feature.automation2.dashboard",
- "label": {"en": "Access Dashboard", "de": "Dashboard aufrufen", "fr": "Acceder au tableau de bord"},
- "meta": {"endpoint": "/api/automation2/{instanceId}/info", "method": "GET"}
+ "objectKey": "resource.feature.graphicalEditor.dashboard",
+ "label": "Dashboard aufrufen",
+ "meta": {"endpoint": "/api/workflows/{instanceId}/info", "method": "GET"}
},
{
- "objectKey": "resource.feature.automation2.node-types",
- "label": {"en": "Get Node Types", "de": "Node-Typen abrufen", "fr": "Obtenir types de nœuds"},
- "meta": {"endpoint": "/api/automation2/{instanceId}/node-types", "method": "GET"}
+ "objectKey": "resource.feature.graphicalEditor.node-types",
+ "label": "Node-Typen abrufen",
+ "meta": {"endpoint": "/api/workflows/{instanceId}/node-types", "method": "GET"}
},
{
- "objectKey": "resource.feature.automation2.execute",
- "label": {"en": "Execute Workflow", "de": "Workflow ausführen", "fr": "Exécuter le workflow"},
- "meta": {"endpoint": "/api/automation2/{instanceId}/execute", "method": "POST"}
+ "objectKey": "resource.feature.graphicalEditor.execute",
+ "label": "Workflow ausführen",
+ "meta": {"endpoint": "/api/workflows/{instanceId}/execute", "method": "POST"}
},
]
TEMPLATE_ROLES = [
{
- "roleLabel": "automation2-viewer",
- "description": {
- "en": "Automation2 Viewer - View workflows (read-only)",
- "de": "Automation2 Betrachter - Workflows ansehen (nur lesen)",
- "fr": "Visualiseur Automation2 - Consulter les workflows (lecture seule)",
- },
+ "roleLabel": "graphicalEditor-viewer",
+ "description": "Grafischer Editor Betrachter - Workflows ansehen (nur lesen)",
"accessRules": [
- {"context": "UI", "item": "ui.feature.automation2.workflows", "view": True},
- {"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
],
},
{
- "roleLabel": "automation2-user",
- "description": {
- "en": "Automation2 User - Use automation2 flow builder",
- "de": "Automation2 Benutzer - Flow-Builder nutzen",
- "fr": "Utilisateur Automation2 - Utiliser le flow builder",
- },
+ "roleLabel": "graphicalEditor-user",
+ "description": "Grafischer Editor Benutzer - Flow-Builder nutzen",
"accessRules": [
- {"context": "UI", "item": "ui.feature.automation2.editor", "view": True},
- {"context": "UI", "item": "ui.feature.automation2.workflows", "view": True},
- {"context": "UI", "item": "ui.feature.automation2.workflows-tasks", "view": True},
- {"context": "RESOURCE", "item": "resource.feature.automation2.dashboard", "view": True},
- {"context": "RESOURCE", "item": "resource.feature.automation2.node-types", "view": True},
- {"context": "RESOURCE", "item": "resource.feature.automation2.execute", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.editor", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True},
+ {"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.graphicalEditor.dashboard", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.graphicalEditor.node-types", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.graphicalEditor.execute", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "m"},
],
},
{
- "roleLabel": "automation2-admin",
- "description": {
- "en": "Automation2 Admin - Full UI and API for the instance; data remains user-scoped (MY)",
- "de": "Automation2 Admin - Volle UI und API für die Instanz; Daten weiterhin benutzerspezifisch (MY)",
- "fr": "Administrateur Automation2 - UI et API complets pour l'instance; donnees limitees a l'utilisateur (MY)",
- },
+ "roleLabel": "graphicalEditor-admin",
+ "description": "Grafischer Editor Admin - Volle UI und API für die Instanz; Daten weiterhin benutzerspezifisch (MY)",
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "RESOURCE", "item": None, "view": True},
@@ -113,14 +107,14 @@ def getRequiredServiceKeys() -> List[str]:
return [s["serviceKey"] for s in REQUIRED_SERVICES]
-def getAutomation2Services(
+def getGraphicalEditorServices(
user,
mandateId: Optional[str] = None,
featureInstanceId: Optional[str] = None,
workflow=None,
-) -> "_Automation2ServiceHub":
+) -> "_GraphicalEditorServiceHub":
"""
- Get a service hub for automation2 using the service center.
+ Get a service hub for graphicalEditor using the service center.
Used for methodDiscovery (I/O nodes) and execution (ActionExecutor).
"""
from modules.serviceCenter import getService
@@ -128,10 +122,11 @@ def getAutomation2Services(
_workflow = workflow
if _workflow is None:
+ import uuid as _uuid
_workflow = type(
"_Placeholder",
(),
- {"featureCode": FEATURE_CODE, "id": None, "workflowMode": None, "messages": []},
+ {"featureCode": FEATURE_CODE, "id": f"transient-{_uuid.uuid4().hex[:12]}", "workflowMode": None, "messages": []},
)()
ctx = ServiceCenterContext(
@@ -141,12 +136,12 @@ def getAutomation2Services(
workflow=_workflow,
)
- hub = _Automation2ServiceHub()
+ hub = _GraphicalEditorServiceHub()
hub.user = user
hub.mandateId = mandateId
hub.featureInstanceId = featureInstanceId
hub._service_context = ctx
- hub.workflow = workflow
+ hub.workflow = _workflow
hub.featureCode = FEATURE_CODE
for spec in REQUIRED_SERVICES:
@@ -155,7 +150,7 @@ def getAutomation2Services(
svc = getService(key, ctx)
setattr(hub, key, svc)
except Exception as e:
- logger.warning(f"Could not resolve service '{key}' for automation2: {e}")
+ logger.warning(f"Could not resolve service '{key}' for graphicalEditor: {e}")
setattr(hub, key, None)
if hub.chat:
@@ -167,8 +162,12 @@ def getAutomation2Services(
return hub
-class _Automation2ServiceHub:
- """Lightweight hub for automation2 (methodDiscovery, execution)."""
+# Backward-compatible alias used by workflows/automation2/ execution engine
+getAutomation2Services = getGraphicalEditorServices
+
+
+class _GraphicalEditorServiceHub:
+ """Lightweight hub for graphicalEditor (methodDiscovery, execution)."""
user = None
mandateId = None
@@ -190,12 +189,16 @@ class _Automation2ServiceHub:
async def onStart(eventUser) -> None:
- """Feature startup. Email poller is started on-demand when a run pauses for email.checkEmail."""
+ """Feature startup: start consolidated scheduler."""
+ from modules.workflows.scheduler.mainScheduler import start as startScheduler
+ startScheduler(eventUser)
async def onStop(eventUser) -> None:
- """Feature shutdown - remove email poller if running."""
- from modules.features.automation2.emailPoller import stop as stopEmailPoller
+ """Feature shutdown - stop scheduler and email poller."""
+ from modules.workflows.scheduler.mainScheduler import stop as stopScheduler
+ stopScheduler()
+ from modules.features.graphicalEditor.emailPoller import stop as stopEmailPoller
stopEmailPoller(eventUser)
@@ -257,6 +260,7 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
rootInterface = getRootInterface()
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
@@ -270,7 +274,7 @@ def _syncTemplateRolesToDb() -> int:
else:
newRole = Role(
roleLabel=roleLabel,
- description=template.get("description", {}),
+ description=coerce_text_multilingual(template.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
@@ -283,7 +287,6 @@ def _syncTemplateRolesToDb() -> int:
_ensureAccessRulesForRole(rootInterface, roleId, template.get("accessRules", []))
- # Sync same rules to mandate-specific roles (so Workflows & Tasks etc. appear in sidebar)
for r in existingRoles:
if r.mandateId and r.roleLabel == roleLabel:
added = _ensureAccessRulesForRole(
diff --git a/modules/features/automation2/nodeDefinitions/__init__.py b/modules/features/graphicalEditor/nodeDefinitions/__init__.py
similarity index 83%
rename from modules/features/automation2/nodeDefinitions/__init__.py
rename to modules/features/graphicalEditor/nodeDefinitions/__init__.py
index 2f4920c8..ab41094b 100644
--- a/modules/features/automation2/nodeDefinitions/__init__.py
+++ b/modules/features/graphicalEditor/nodeDefinitions/__init__.py
@@ -9,6 +9,8 @@ from .email import EMAIL_NODES
from .sharepoint import SHAREPOINT_NODES
from .clickup import CLICKUP_NODES
from .file import FILE_NODES
+from .trustee import TRUSTEE_NODES
+from .data import DATA_NODES
STATIC_NODE_TYPES = (
TRIGGER_NODES
@@ -19,4 +21,6 @@ STATIC_NODE_TYPES = (
+ SHAREPOINT_NODES
+ CLICKUP_NODES
+ FILE_NODES
+ + TRUSTEE_NODES
+ + DATA_NODES
)
diff --git a/modules/features/graphicalEditor/nodeDefinitions/ai.py b/modules/features/graphicalEditor/nodeDefinitions/ai.py
new file mode 100644
index 00000000..08e82340
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/ai.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2025 Patrick Motsch
+# AI node definitions - map to methodAi actions.
+
+from modules.shared.i18nRegistry import t
+
+AI_NODES = [
+ {
+ "id": "ai.prompt",
+ "category": "ai",
+ "label": t("Prompt"),
+ "description": t("Prompt eingeben und KI führt aus"),
+ "parameters": [
+ {"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea",
+ "description": t("KI-Prompt")},
+ {"name": "outputFormat", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["text", "json", "emailDraft"]},
+ "description": t("Ausgabeformat"), "default": "text"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "AiResult"}},
+ "meta": {"icon": "mdi-robot", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "process",
+ },
+ {
+ "id": "ai.webResearch",
+ "category": "ai",
+ "label": t("Web-Recherche"),
+ "description": t("Recherche im Web"),
+ "parameters": [
+ {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
+ "description": t("Recherche-Anfrage")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "AiResult"}},
+ "meta": {"icon": "mdi-magnify", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "webResearch",
+ },
+ {
+ "id": "ai.summarizeDocument",
+ "category": "ai",
+ "label": t("Dokument zusammenfassen"),
+ "description": t("Dokumentinhalt zusammenfassen"),
+ "parameters": [
+ {"name": "summaryLength", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["short", "medium", "long"]},
+ "description": t("Kurz, mittel oder lang"), "default": "medium"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "AiResult"}},
+ "meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "summarizeDocument",
+ },
+ {
+ "id": "ai.translateDocument",
+ "category": "ai",
+ "label": t("Dokument übersetzen"),
+ "description": t("Dokument in Zielsprache übersetzen"),
+ "parameters": [
+ {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "select",
+ "frontendOptions": {"options": ["en", "de", "fr", "it", "es", "pt", "nl"]},
+ "description": t("Zielsprache")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "AiResult"}},
+ "meta": {"icon": "mdi-translate", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "translateDocument",
+ },
+ {
+ "id": "ai.convertDocument",
+ "category": "ai",
+ "label": t("Dokument konvertieren"),
+ "description": t("Dokument in anderes Format konvertieren"),
+ "parameters": [
+ {"name": "targetFormat", "type": "string", "required": True, "frontendType": "select",
+ "frontendOptions": {"options": ["pdf", "docx", "txt", "html", "md"]},
+ "description": t("Zielformat")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "meta": {"icon": "mdi-file-convert", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "convertDocument",
+ },
+ {
+ "id": "ai.generateDocument",
+ "category": "ai",
+ "label": t("Dokument generieren"),
+ "description": t("Dokument aus Prompt generieren"),
+ "parameters": [
+ {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
+ "description": t("Generierungs-Prompt")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "meta": {"icon": "mdi-file-plus", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "generateDocument",
+ },
+ {
+ "id": "ai.generateCode",
+ "category": "ai",
+ "label": t("Code generieren"),
+ "description": t("Code aus Beschreibung generieren"),
+ "parameters": [
+ {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea",
+ "description": t("Code-Generierungs-Prompt")},
+ {"name": "language", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["python", "javascript", "typescript", "java", "csharp", "go"]},
+ "description": t("Programmiersprache"), "default": "python"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "AiResult"}},
+ "meta": {"icon": "mdi-code-tags", "color": "#9C27B0"},
+ "_method": "ai",
+ "_action": "generateCode",
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/clickup.py b/modules/features/graphicalEditor/nodeDefinitions/clickup.py
new file mode 100644
index 00000000..3f194e16
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/clickup.py
@@ -0,0 +1,178 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""ClickUp nodes — map to MethodClickup actions."""
+
+from modules.shared.i18nRegistry import t
+
+CLICKUP_NODES = [
+ {
+ "id": "clickup.searchTasks",
+ "category": "clickup",
+ "label": t("Aufgaben suchen"),
+ "description": t("Aufgaben in einem Workspace suchen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("ClickUp-Verbindung")},
+ {"name": "teamId", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Team-/Workspace-ID")},
+ {"name": "query", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Suchbegriff")},
+ {"name": "page", "type": "number", "required": False, "frontendType": "number",
+ "description": t("Seite"), "default": 0},
+ {"name": "listId", "type": "string", "required": False, "frontendType": "clickupList",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("In dieser Liste suchen")},
+ {"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Erledigte einbeziehen"), "default": False},
+ {"name": "fullTaskData", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Vollständige Daten"), "default": False},
+ {"name": "matchNameOnly", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Nur Titel"), "default": True},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "TaskList"}},
+ "meta": {"icon": "mdi-magnify", "color": "#7B68EE"},
+ "_method": "clickup",
+ "_action": "searchTasks",
+ },
+ {
+ "id": "clickup.listTasks",
+ "category": "clickup",
+ "label": t("Aufgaben auflisten"),
+ "description": t("Aufgaben einer Liste auflisten"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("ClickUp-Verbindung")},
+ {"name": "pathQuery", "type": "string", "required": True, "frontendType": "clickupList",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Pfad zur Liste")},
+ {"name": "page", "type": "number", "required": False, "frontendType": "number",
+ "description": t("Seite"), "default": 0},
+ {"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Erledigte einbeziehen"), "default": False},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "TaskList"}},
+ "meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"},
+ "_method": "clickup",
+ "_action": "listTasks",
+ },
+ {
+ "id": "clickup.getTask",
+ "category": "clickup",
+ "label": t("Aufgabe abrufen"),
+ "description": t("Eine Aufgabe abrufen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("ClickUp-Verbindung")},
+ {"name": "taskId", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Task-ID")},
+ {"name": "pathQuery", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Oder Pfad")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "TaskResult"}},
+ "meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"},
+ "_method": "clickup",
+ "_action": "getTask",
+ },
+ {
+ "id": "clickup.createTask",
+ "category": "clickup",
+ "label": t("Aufgabe erstellen"),
+ "description": t("Aufgabe erstellen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("ClickUp-Verbindung")},
+ {"name": "teamId", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Workspace")},
+ {"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Pfad zur Liste")},
+ {"name": "listId", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Listen-ID")},
+ {"name": "name", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Name")},
+ {"name": "description", "type": "string", "required": False, "frontendType": "textarea",
+ "description": t("Beschreibung")},
+ {"name": "taskStatus", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Status")},
+ {"name": "taskPriority", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["1", "2", "3", "4"]},
+ "description": t("Priorität 1-4")},
+ {"name": "taskDueDateMs", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Fälligkeit (ms)")},
+ {"name": "taskAssigneeIds", "type": "object", "required": False, "frontendType": "json",
+ "description": t("Zugewiesene")},
+ {"name": "taskTimeEstimateMs", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Zeitschätzung (ms)")},
+ {"name": "taskTimeEstimateHours", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Zeitschätzung (h)")},
+ {"name": "customFieldValues", "type": "object", "required": False, "frontendType": "json",
+ "description": t("Benutzerdefinierte Felder")},
+ {"name": "taskFields", "type": "string", "required": False, "frontendType": "json",
+ "description": t("Zusätzliches JSON")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "TaskResult"}},
+ "meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"},
+ "_method": "clickup",
+ "_action": "createTask",
+ },
+ {
+ "id": "clickup.updateTask",
+ "category": "clickup",
+ "label": t("Aufgabe aktualisieren"),
+ "description": t("Felder der Aufgabe ändern"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("ClickUp-Verbindung")},
+ {"name": "taskId", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Task-ID")},
+ {"name": "path", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Oder Pfad")},
+ {"name": "taskUpdateEntries", "type": "object", "required": False, "frontendType": "keyValueRows",
+ "description": t("Zu ändernde Felder")},
+ {"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json",
+ "description": t("JSON für API")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["TaskResult", "Transit"]}},
+ "outputPorts": {0: {"schema": "TaskResult"}},
+ "meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"},
+ "_method": "clickup",
+ "_action": "updateTask",
+ },
+ {
+ "id": "clickup.uploadAttachment",
+ "category": "clickup",
+ "label": t("Anhang hochladen"),
+ "description": t("Datei an Task anhängen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("ClickUp-Verbindung")},
+ {"name": "taskId", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Task-ID")},
+ {"name": "path", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Oder Pfad")},
+ {"name": "fileName", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Dateiname")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-attachment", "color": "#7B68EE"},
+ "_method": "clickup",
+ "_action": "uploadAttachment",
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/data.py b/modules/features/graphicalEditor/nodeDefinitions/data.py
new file mode 100644
index 00000000..f5eceb16
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/data.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2025 Patrick Motsch
+# Data manipulation node definitions: aggregate, transform, filter.
+
+from modules.shared.i18nRegistry import t
+
+DATA_NODES = [
+ {
+ "id": "data.aggregate",
+ "category": "data",
+ "label": t("Sammeln"),
+ "description": t("Ergebnisse aus Schleifen-Iterationen sammeln"),
+ "parameters": [
+ {"name": "mode", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["collect", "concat", "sum", "count"]},
+ "description": t("Aggregationsmodus"), "default": "collect"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "AggregateResult"}},
+ "executor": "data",
+ "meta": {"icon": "mdi-playlist-plus", "color": "#607D8B"},
+ },
+ {
+ "id": "data.transform",
+ "category": "data",
+ "label": t("Umwandeln"),
+ "description": t("Daten umstrukturieren"),
+ "parameters": [
+ {"name": "mappings", "type": "json", "required": True, "frontendType": "mappingTable",
+ "description": t("Feld-Zuordnungen"), "default": []},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult", "dynamic": True, "deriveFrom": "mappings"}},
+ "executor": "data",
+ "meta": {"icon": "mdi-swap-horizontal-bold", "color": "#607D8B"},
+ },
+ {
+ "id": "data.filter",
+ "category": "data",
+ "label": t("Filtern"),
+ "description": t("Elemente nach Bedingung filtern"),
+ "parameters": [
+ {"name": "condition", "type": "string", "required": True, "frontendType": "filterExpression",
+ "description": t("Filterbedingung")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["AggregateResult", "FileList", "TaskList", "EmailList", "DocumentList"]}},
+ "outputPorts": {0: {"schema": "Transit"}},
+ "executor": "data",
+ "meta": {"icon": "mdi-filter-outline", "color": "#607D8B"},
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/email.py b/modules/features/graphicalEditor/nodeDefinitions/email.py
new file mode 100644
index 00000000..1978fdfe
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/email.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2025 Patrick Motsch
+# Email node definitions - map to methodOutlook actions.
+
+from modules.shared.i18nRegistry import t
+
+EMAIL_NODES = [
+ {
+ "id": "email.checkEmail",
+ "category": "email",
+ "label": t("E-Mail prüfen"),
+ "description": t("Neue E-Mails prüfen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("E-Mail-Konto Verbindung")},
+ {"name": "folder", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Ordner"), "default": "Inbox"},
+ {"name": "limit", "type": "number", "required": False, "frontendType": "number",
+ "description": t("Max E-Mails"), "default": 100},
+ {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Nur von dieser Adresse"), "default": ""},
+ {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Betreff muss enthalten"), "default": ""},
+ {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Nur mit Anhängen"), "default": False},
+ {"name": "filter", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Erweitert: Filter-Text"), "default": ""},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "EmailList"}},
+ "meta": {"icon": "mdi-email-check", "color": "#1976D2"},
+ "_method": "outlook",
+ "_action": "readEmails",
+ },
+ {
+ "id": "email.searchEmail",
+ "category": "email",
+ "label": t("E-Mail suchen"),
+ "description": t("E-Mails suchen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("E-Mail-Konto Verbindung")},
+ {"name": "query", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Suchbegriff"), "default": ""},
+ {"name": "folder", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Ordner"), "default": "Inbox"},
+ {"name": "limit", "type": "number", "required": False, "frontendType": "number",
+ "description": t("Max E-Mails"), "default": 100},
+ {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Von Adresse"), "default": ""},
+ {"name": "toAddress", "type": "string", "required": False, "frontendType": "text",
+ "description": t("An Adresse"), "default": ""},
+ {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Betreff enthält"), "default": ""},
+ {"name": "bodyContains", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Inhalt enthält"), "default": ""},
+ {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Mit Anhängen"), "default": False},
+ {"name": "filter", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Erweitert: KQL-Filter"), "default": ""},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "EmailList"}},
+ "meta": {"icon": "mdi-email-search", "color": "#1976D2"},
+ "_method": "outlook",
+ "_action": "searchEmails",
+ },
+ {
+ "id": "email.draftEmail",
+ "category": "email",
+ "label": t("E-Mail entwerfen"),
+ "description": t("E-Mail-Entwurf erstellen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("E-Mail-Konto")},
+ {"name": "subject", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Betreff")},
+ {"name": "body", "type": "string", "required": True, "frontendType": "textarea",
+ "description": t("Inhalt")},
+ {"name": "to", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Empfänger"), "default": ""},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["EmailDraft", "AiResult", "Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-email-edit", "color": "#1976D2"},
+ "_method": "outlook",
+ "_action": "composeAndDraftEmailWithContext",
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/file.py b/modules/features/graphicalEditor/nodeDefinitions/file.py
new file mode 100644
index 00000000..f3714741
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/file.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2025 Patrick Motsch
+# File node definitions - create files from context (e.g. from AI nodes).
+
+from modules.shared.i18nRegistry import t
+
+FILE_NODES = [
+ {
+ "id": "file.create",
+ "category": "file",
+ "label": t("Datei erstellen"),
+ "description": t("Erstellt eine Datei aus Kontext (Text/Markdown von KI)."),
+ "parameters": [
+ {"name": "contentSources", "type": "json", "required": False, "frontendType": "json",
+ "description": t("Kontext-Quellen"), "default": []},
+ {"name": "outputFormat", "type": "string", "required": True, "frontendType": "select",
+ "frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]},
+ "description": t("Ausgabeformat"), "default": "docx"},
+ {"name": "title", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Dokumenttitel")},
+ {"name": "templateName", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["default", "corporate", "minimal"]},
+ "description": t("Stil-Vorlage")},
+ {"name": "language", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["de", "en", "fr"]},
+ "description": t("Sprache"), "default": "de"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"},
+ "_method": "file",
+ "_action": "create",
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/flow.py b/modules/features/graphicalEditor/nodeDefinitions/flow.py
new file mode 100644
index 00000000..91faa4e5
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/flow.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2025 Patrick Motsch
+# Flow control node definitions.
+
+from modules.shared.i18nRegistry import t
+
+FLOW_NODES = [
+ {
+ "id": "flow.ifElse",
+ "category": "flow",
+ "label": t("Wenn / Sonst"),
+ "description": t("Verzweigung nach Bedingung"),
+ "parameters": [
+ {
+ "name": "condition",
+ "type": "string",
+ "required": True,
+ "frontendType": "condition",
+ "description": t("Bedingung"),
+ },
+ ],
+ "inputs": 1,
+ "outputs": 2,
+ "outputLabels": [t("Ja"), t("Nein")],
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "Transit"}, 1: {"schema": "Transit"}},
+ "executor": "flow",
+ "meta": {"icon": "mdi-source-branch", "color": "#FF9800"},
+ },
+ {
+ "id": "flow.switch",
+ "category": "flow",
+ "label": t("Switch"),
+ "description": t("Mehrere Zweige nach Wert"),
+ "parameters": [
+ {
+ "name": "value",
+ "type": "string",
+ "required": True,
+ "frontendType": "text",
+ "description": t("Zu vergleichender Wert"),
+ },
+ {
+ "name": "cases",
+ "type": "array",
+ "required": False,
+ "frontendType": "caseList",
+ "description": t("Fälle"),
+ },
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "Transit"}},
+ "executor": "flow",
+ "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"},
+ },
+ {
+ "id": "flow.loop",
+ "category": "flow",
+ "label": t("Schleife / Für Jedes"),
+ "description": t("Über Array-Elemente iterieren"),
+ "parameters": [
+ {
+ "name": "items",
+ "type": "string",
+ "required": True,
+ "frontendType": "text",
+ "description": t("Pfad zum Array"),
+ },
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "LoopItem"}},
+ "executor": "flow",
+ "meta": {"icon": "mdi-repeat", "color": "#FF9800"},
+ },
+ {
+ "id": "flow.merge",
+ "category": "flow",
+ "label": t("Zusammenführen"),
+ "description": t("Mehrere Zweige zusammenführen"),
+ "parameters": [
+ {
+ "name": "mode",
+ "type": "string",
+ "required": False,
+ "frontendType": "select",
+ "frontendOptions": {"options": ["first", "all", "append"]},
+ "description": t("Zusammenführungsmodus"),
+ "default": "first",
+ },
+ ],
+ "inputs": 2,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}, 1: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "MergeResult"}},
+ "executor": "flow",
+ "meta": {"icon": "mdi-call-merge", "color": "#FF9800"},
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/input.py b/modules/features/graphicalEditor/nodeDefinitions/input.py
new file mode 100644
index 00000000..b90efaa2
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/input.py
@@ -0,0 +1,148 @@
+# Copyright (c) 2025 Patrick Motsch
+# Input/Human node definitions - nodes that require user action.
+
+from modules.shared.i18nRegistry import t
+
+INPUT_NODES = [
+ {
+ "id": "input.form",
+ "category": "input",
+ "label": t("Formular"),
+ "description": t("Benutzer füllt ein Formular aus"),
+ "parameters": [
+ {
+ "name": "fields",
+ "type": "json",
+ "required": True,
+ "frontendType": "fieldBuilder",
+ "description": t("Formularfelder"),
+ "default": [],
+ },
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "fields"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-form-textbox", "color": "#9C27B0"},
+ },
+ {
+ "id": "input.approval",
+ "category": "input",
+ "label": t("Genehmigung"),
+ "description": t("Benutzer genehmigt oder lehnt ab"),
+ "parameters": [
+ {"name": "title", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Genehmigungstitel")},
+ {"name": "description", "type": "string", "required": False, "frontendType": "textarea",
+ "description": t("Was genehmigt werden soll")},
+ {"name": "approvalType", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["generic", "document"]},
+ "description": t("Typ: document oder generic"), "default": "generic"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "BoolResult"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-check-decagram", "color": "#4CAF50"},
+ },
+ {
+ "id": "input.upload",
+ "category": "input",
+ "label": t("Upload"),
+ "description": t("Benutzer lädt Datei(en) hoch"),
+ "parameters": [
+ {"name": "accept", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Accept-String"), "default": ""},
+ {"name": "allowedTypes", "type": "json", "required": False, "frontendType": "multiselect",
+ "frontendOptions": {"options": ["pdf", "docx", "xlsx", "pptx", "txt", "csv", "jpg", "png", "gif"]},
+ "description": t("Ausgewählte Dateitypen"), "default": []},
+ {"name": "maxSize", "type": "number", "required": False, "frontendType": "number",
+ "description": t("Max. Dateigröße in MB"), "default": 10},
+ {"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Mehrere Dateien erlauben"), "default": False},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-upload", "color": "#2196F3"},
+ },
+ {
+ "id": "input.comment",
+ "category": "input",
+ "label": t("Kommentar"),
+ "description": t("Benutzer fügt einen Kommentar hinzu"),
+ "parameters": [
+ {"name": "placeholder", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Platzhalter"), "default": ""},
+ {"name": "required", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Kommentar erforderlich"), "default": True},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "TextResult"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-comment-text", "color": "#FF9800"},
+ },
+ {
+ "id": "input.review",
+ "category": "input",
+ "label": t("Prüfung"),
+ "description": t("Benutzer prüft Inhalt"),
+ "parameters": [
+ {"name": "contentRef", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Referenz auf Inhalt")},
+ {"name": "reviewType", "type": "string", "required": False, "frontendType": "select",
+ "frontendOptions": {"options": ["generic", "document"]},
+ "description": t("Art der Prüfung"), "default": "generic"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "BoolResult"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-magnify-scan", "color": "#673AB7"},
+ },
+ {
+ "id": "input.selection",
+ "category": "input",
+ "label": t("Auswahl"),
+ "description": t("Benutzer wählt aus Optionen"),
+ "parameters": [
+ {"name": "options", "type": "json", "required": True, "frontendType": "keyValueRows",
+ "description": t("Optionen"), "default": []},
+ {"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Mehrfachauswahl erlauben"), "default": False},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "TextResult"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-format-list-checks", "color": "#009688"},
+ },
+ {
+ "id": "input.confirmation",
+ "category": "input",
+ "label": t("Bestätigung"),
+ "description": t("Benutzer bestätigt Ja/Nein"),
+ "parameters": [
+ {"name": "question", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Zu bestätigende Frage")},
+ {"name": "confirmLabel", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Label für Bestätigen-Button"), "default": "Confirm"},
+ {"name": "rejectLabel", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Label für Ablehnen-Button"), "default": "Reject"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "BoolResult"}},
+ "executor": "input",
+ "meta": {"icon": "mdi-checkbox-marked-circle", "color": "#8BC34A"},
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py
new file mode 100644
index 00000000..617354d3
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2025 Patrick Motsch
+# SharePoint node definitions - map to methodSharepoint actions.
+
+from modules.shared.i18nRegistry import t
+
+SHAREPOINT_NODES = [
+ {
+ "id": "sharepoint.findFile",
+ "category": "sharepoint",
+ "label": t("Datei finden"),
+ "description": t("Datei nach Pfad oder Suche finden"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung")},
+ {"name": "searchQuery", "type": "string", "required": True, "frontendType": "text",
+ "description": t("Suchanfrage oder Pfad")},
+ {"name": "site", "type": "string", "required": False, "frontendType": "text",
+ "description": t("Optionaler Site-Hinweis"), "default": ""},
+ {"name": "maxResults", "type": "number", "required": False, "frontendType": "number",
+ "description": t("Max Ergebnisse"), "default": 1000},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "FileList"}},
+ "meta": {"icon": "mdi-file-search", "color": "#0078D4"},
+ "_method": "sharepoint",
+ "_action": "findDocumentPath",
+ },
+ {
+ "id": "sharepoint.readFile",
+ "category": "sharepoint",
+ "label": t("Datei lesen"),
+ "description": t("Inhalt aus Datei extrahieren"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung")},
+ {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Dateipfad")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "meta": {"icon": "mdi-file-document", "color": "#0078D4"},
+ "_method": "sharepoint",
+ "_action": "readDocuments",
+ },
+ {
+ "id": "sharepoint.uploadFile",
+ "category": "sharepoint",
+ "label": t("Datei hochladen"),
+ "description": t("Datei zu SharePoint hochladen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung")},
+ {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Zielordner-Pfad")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-upload", "color": "#0078D4"},
+ "_method": "sharepoint",
+ "_action": "uploadFile",
+ },
+ {
+ "id": "sharepoint.listFiles",
+ "category": "sharepoint",
+ "label": t("Dateien auflisten"),
+ "description": t("Dateien in Ordner auflisten"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung")},
+ {"name": "pathQuery", "type": "string", "required": False, "frontendType": "sharepointFolder",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Ordnerpfad"), "default": "/"},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "FileList"}},
+ "meta": {"icon": "mdi-folder-open", "color": "#0078D4"},
+ "_method": "sharepoint",
+ "_action": "listDocuments",
+ },
+ {
+ "id": "sharepoint.downloadFile",
+ "category": "sharepoint",
+ "label": t("Datei herunterladen"),
+ "description": t("Datei vom Pfad herunterladen"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung")},
+ {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Vollständiger Dateipfad")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["FileList", "Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "meta": {"icon": "mdi-download", "color": "#0078D4"},
+ "_method": "sharepoint",
+ "_action": "downloadFileByPath",
+ },
+ {
+ "id": "sharepoint.copyFile",
+ "category": "sharepoint",
+ "label": t("Datei kopieren"),
+ "description": t("Datei an Ziel kopieren"),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung")},
+ {"name": "sourcePath", "type": "string", "required": True, "frontendType": "sharepointFile",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Quelldatei-Pfad")},
+ {"name": "destPath", "type": "string", "required": True, "frontendType": "sharepointFolder",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("Zielordner")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-content-copy", "color": "#0078D4"},
+ "_method": "sharepoint",
+ "_action": "copyFile",
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/triggers.py b/modules/features/graphicalEditor/nodeDefinitions/triggers.py
new file mode 100644
index 00000000..69b1aa17
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/triggers.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2025 Patrick Motsch
+# Canvas start nodes — variant reflects workflow configuration (gear in editor).
+
+from modules.shared.i18nRegistry import t
+
+TRIGGER_NODES = [
+ {
+ "id": "trigger.manual",
+ "category": "trigger",
+ "label": t("Start"),
+ "description": t("Manuell, API oder Hintergrund-Starts (Webhook, E-Mail, …)."),
+ "parameters": [],
+ "inputs": 0,
+ "outputs": 1,
+ "inputPorts": {},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "executor": "trigger",
+ "meta": {"icon": "mdi-play", "color": "#4CAF50"},
+ },
+ {
+ "id": "trigger.form",
+ "category": "trigger",
+ "label": t("Start (Formular)"),
+ "description": t("Felder werden beim Start befüllt; konfigurieren Sie die Felder auf dieser Node."),
+ "parameters": [
+ {
+ "name": "formFields",
+ "type": "json",
+ "required": False,
+ "frontendType": "fieldBuilder",
+ "description": t("Felddefinitionen"),
+ },
+ ],
+ "inputs": 0,
+ "outputs": 1,
+ "inputPorts": {},
+ "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "formFields"}},
+ "executor": "trigger",
+ "meta": {"icon": "mdi-form-select", "color": "#9C27B0"},
+ },
+ {
+ "id": "trigger.schedule",
+ "category": "trigger",
+ "label": t("Start (Zeitplan)"),
+ "description": t("Cron-Ausdruck für geplante Läufe."),
+ "parameters": [
+ {
+ "name": "cron",
+ "type": "string",
+ "required": False,
+ "frontendType": "cron",
+ "description": t("Cron-Ausdruck"),
+ },
+ ],
+ "inputs": 0,
+ "outputs": 1,
+ "inputPorts": {},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "executor": "trigger",
+ "meta": {"icon": "mdi-clock", "color": "#2196F3"},
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeDefinitions/trustee.py b/modules/features/graphicalEditor/nodeDefinitions/trustee.py
new file mode 100644
index 00000000..5d8a0f21
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeDefinitions/trustee.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2025 Patrick Motsch
+# Trustee node definitions - map to methodTrustee actions.
+
+from modules.shared.i18nRegistry import t
+
+TRUSTEE_NODES = [
+ {
+ "id": "trustee.refreshAccountingData",
+ "category": "trustee",
+ "label": t("Buchhaltungsdaten aktualisieren"),
+ "description": t("Buchhaltungsdaten aus externem System importieren/aktualisieren."),
+ "parameters": [
+ {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Trustee Feature-Instanz-ID")},
+ {"name": "forceRefresh", "type": "boolean", "required": False, "frontendType": "checkbox",
+ "description": t("Import erzwingen"), "default": False},
+ {"name": "dateFrom", "type": "string", "required": False, "frontendType": "date",
+ "description": t("Startdatum"), "default": ""},
+ {"name": "dateTo", "type": "string", "required": False, "frontendType": "date",
+ "description": t("Enddatum"), "default": ""},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-database-refresh", "color": "#4CAF50"},
+ "_method": "trustee",
+ "_action": "refreshAccountingData",
+ },
+ {
+ "id": "trustee.extractFromFiles",
+ "category": "trustee",
+ "label": t("Dokumente extrahieren"),
+ "description": t("Dokumenttyp und Daten aus PDF/JPG per AI extrahieren."),
+ "parameters": [
+ {"name": "connectionReference", "type": "string", "required": False, "frontendType": "userConnection",
+ "description": t("SharePoint-Verbindung"), "default": ""},
+ {"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder",
+ "frontendOptions": {"dependsOn": "connectionReference"},
+ "description": t("SharePoint-Ordnerpfad"), "default": ""},
+ {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Trustee Feature-Instanz-ID")},
+ {"name": "prompt", "type": "string", "required": False, "frontendType": "textarea",
+ "description": t("AI-Prompt für Extraktion"), "default": ""},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "DocumentList"}},
+ "meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50"},
+ "_method": "trustee",
+ "_action": "extractFromFiles",
+ },
+ {
+ "id": "trustee.processDocuments",
+ "category": "trustee",
+ "label": t("Dokumente verarbeiten"),
+ "description": t("TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen."),
+ "parameters": [
+ {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
+ "description": t("Automatisch via Wire-Verbindung befüllt")},
+ {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Trustee Feature-Instanz-ID")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-file-document-check", "color": "#4CAF50"},
+ "_method": "trustee",
+ "_action": "processDocuments",
+ },
+ {
+ "id": "trustee.syncToAccounting",
+ "category": "trustee",
+ "label": t("In Buchhaltung synchronisieren"),
+ "description": t("Trustee-Positionen in Buchhaltungssystem übertragen."),
+ "parameters": [
+ {"name": "documentList", "type": "string", "required": False, "frontendType": "hidden",
+ "description": t("Automatisch via Wire-Verbindung befüllt")},
+ {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden",
+ "description": t("Trustee Feature-Instanz-ID")},
+ ],
+ "inputs": 1,
+ "outputs": 1,
+ "inputPorts": {0: {"accepts": ["Transit"]}},
+ "outputPorts": {0: {"schema": "ActionResult"}},
+ "meta": {"icon": "mdi-calculator", "color": "#4CAF50"},
+ "_method": "trustee",
+ "_action": "syncToAccounting",
+ },
+]
diff --git a/modules/features/graphicalEditor/nodeRegistry.py b/modules/features/graphicalEditor/nodeRegistry.py
new file mode 100644
index 00000000..ea5b67bd
--- /dev/null
+++ b/modules/features/graphicalEditor/nodeRegistry.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Node Type Registry for graphicalEditor - static node definitions (ai, email, sharepoint, trigger, flow, data, input).
+Nodes are defined first; IO/method actions are used at execution time.
+"""
+
+import logging
+from typing import Dict, List, Any
+
+from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES
+from modules.shared.i18nRegistry import normalizePrimaryLanguageTag, resolveText
+
+logger = logging.getLogger(__name__)
+
+
+def getNodeTypes(
+ services: Any = None,
+ language: str = "de",
+) -> List[Dict[str, Any]]:
+ """
+ Return static node types. No dynamic I/O derivation from methodDiscovery.
+ services: Optional (kept for API compatibility, not used).
+ """
+ return list(STATIC_NODE_TYPES)
+
+
+def _pickFromLangMap(d: Any, lang: str) -> Any:
+ """Resolve multilingual dict: ``lang`` → ``xx`` → ``de`` → ``en`` → first non-empty value."""
+ if not isinstance(d, dict) or not d:
+ return None
+ for k in (lang, "xx", "de", "en"):
+ v = d.get(k)
+ if v is not None and v != "":
+ return v
+ for v in d.values():
+ if v is not None and v != "":
+ return v
+ return None
+
+
+def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]:
+ """Apply request language via resolveText (t() keys + multilingual dicts)."""
+ lang = normalizePrimaryLanguageTag(language, "en")
+ out = dict(node)
+ for key in list(out.keys()):
+ if key.startswith("_"):
+ del out[key]
+ lbl = node.get("label")
+ if lbl is not None:
+ out["label"] = resolveText(lbl, lang) or node.get("id", "")
+ desc = node.get("description")
+ if desc is not None:
+ out["description"] = resolveText(desc, lang)
+ ol = node.get("outputLabels")
+ if ol is not None:
+ if isinstance(ol, list):
+ out["outputLabels"] = [resolveText(x, lang) for x in ol]
+ elif isinstance(ol, dict) and ol:
+ first = next(iter(ol.values()), None)
+ if isinstance(first, (list, tuple)):
+ picked = _pickFromLangMap(ol, lang)
+ raw = list(picked) if picked is not None else list(first)
+ out["outputLabels"] = [resolveText(x, lang) for x in raw]
+ params = []
+ for p in node.get("parameters", []):
+ pc = dict(p)
+ pd = p.get("description")
+ if pd is not None:
+ pc["description"] = resolveText(pd, lang)
+ params.append(pc)
+ out["parameters"] = params
+ return out
+
+
+def getNodeTypesForApi(
+ services: Any,
+ language: str = "de",
+) -> Dict[str, Any]:
+ """
+ API-ready response: nodeTypes with localized strings, plus categories, portTypeCatalog, systemVariables.
+ """
+ nodes = getNodeTypes(services, language)
+ localized = [_localizeNode(n, language) for n in nodes]
+ categories = [
+ {"id": "trigger", "label": "Trigger"},
+ {"id": "input", "label": "Eingabe/Mensch"},
+ {"id": "flow", "label": "Ablauf"},
+ {"id": "data", "label": "Daten"},
+ {"id": "ai", "label": "KI"},
+ {"id": "file", "label": "Datei"},
+ {"id": "email", "label": "E-Mail"},
+ {"id": "sharepoint", "label": "SharePoint"},
+ {"id": "clickup", "label": "ClickUp"},
+ {"id": "trustee", "label": "Treuhand"},
+ ]
+
+ catalogSerialized = {}
+ for name, schema in PORT_TYPE_CATALOG.items():
+ catalogSerialized[name] = {
+ "name": schema.name,
+ "fields": [f.model_dump() for f in schema.fields],
+ }
+
+ return {
+ "nodeTypes": localized,
+ "categories": categories,
+ "portTypeCatalog": catalogSerialized,
+ "systemVariables": SYSTEM_VARIABLES,
+ }
+
+
+def getNodeTypeToMethodAction() -> Dict[str, tuple]:
+ """
+ Mapping from node type id to (method, action) for execution.
+ Used by ActionNodeExecutor.
+ """
+ mapping = {}
+ for node in STATIC_NODE_TYPES:
+ method = node.get("_method")
+ action = node.get("_action")
+ if method and action:
+ mapping[node["id"]] = (method, action)
+ return mapping
diff --git a/modules/features/graphicalEditor/portTypes.py b/modules/features/graphicalEditor/portTypes.py
new file mode 100644
index 00000000..d9ae2792
--- /dev/null
+++ b/modules/features/graphicalEditor/portTypes.py
@@ -0,0 +1,510 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Typed Port System for the Graphical Editor.
+
+Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES,
+output normalizers, input extractors, and Transit helpers.
+"""
+
+import logging
+import time
+import uuid
+from typing import Any, Callable, Dict, List, Optional
+
+from pydantic import BaseModel, Field
+
+from modules.shared.i18nRegistry import resolveText
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Pydantic models
+# ---------------------------------------------------------------------------
+
+class PortField(BaseModel):
+ name: str
+ type: str # str, int, bool, List[str], List[Document], Dict[str,Any]
+ description: str = ""
+ required: bool = True
+
+
+class PortSchema(BaseModel):
+ name: str # e.g. "EmailDraft", "AiResult", "Transit"
+ fields: List[PortField]
+
+
+class InputPortDef(BaseModel):
+ accepts: List[str] # list of accepted schema names
+
+
+class OutputPortDef(BaseModel):
+ model_config = {"populate_by_name": True}
+
+ schema_: str = Field(alias="schema")
+ dynamic: bool = False
+ deriveFrom: Optional[str] = None
+
+ def model_dump(self, **kw):
+ d = super().model_dump(**kw)
+ d["schema"] = d.pop("schema_", d.get("schema"))
+ return d
+
+
+# ---------------------------------------------------------------------------
+# PORT_TYPE_CATALOG
+# ---------------------------------------------------------------------------
+
+PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
+ "DocumentList": PortSchema(name="DocumentList", fields=[
+ PortField(name="documents", type="List[Document]",
+ description="Dokumentenliste"),
+ ]),
+ "FileList": PortSchema(name="FileList", fields=[
+ PortField(name="files", type="List[File]",
+ description="Dateiliste"),
+ ]),
+ "EmailDraft": PortSchema(name="EmailDraft", fields=[
+ PortField(name="subject", type="str",
+ description="Betreff"),
+ PortField(name="body", type="str",
+ description="Inhalt"),
+ PortField(name="to", type="List[str]",
+ description="Empfänger"),
+ PortField(name="cc", type="List[str]", required=False,
+ description="CC"),
+ PortField(name="attachments", type="List[Document]", required=False,
+ description="Anhänge"),
+ ]),
+ "EmailList": PortSchema(name="EmailList", fields=[
+ PortField(name="emails", type="List[Email]",
+ description="E-Mails"),
+ ]),
+ "TaskList": PortSchema(name="TaskList", fields=[
+ PortField(name="tasks", type="List[Task]",
+ description="Aufgaben"),
+ ]),
+ "TaskResult": PortSchema(name="TaskResult", fields=[
+ PortField(name="success", type="bool",
+ description="Erfolg"),
+ PortField(name="taskId", type="str",
+ description="Aufgaben-ID"),
+ PortField(name="task", type="Dict",
+ description="Aufgabendaten"),
+ ]),
+ "FormPayload": PortSchema(name="FormPayload", fields=[
+ PortField(name="payload", type="Dict[str,Any]",
+ description="Formulardaten"),
+ ]),
+ "AiResult": PortSchema(name="AiResult", fields=[
+ PortField(name="prompt", type="str",
+ description="Prompt"),
+ PortField(name="response", type="str",
+ description="Antworttext"),
+ PortField(name="responseData", type="Dict", required=False,
+ description="Strukturierte Antwort"),
+ PortField(name="context", type="str",
+ description="Kontext"),
+ PortField(name="documents", type="List[Document]",
+ description="Dokumente"),
+ ]),
+ "BoolResult": PortSchema(name="BoolResult", fields=[
+ PortField(name="result", type="bool",
+ description="Ergebnis"),
+ PortField(name="reason", type="str", required=False,
+ description="Begründung"),
+ ]),
+ "TextResult": PortSchema(name="TextResult", fields=[
+ PortField(name="text", type="str",
+ description="Text"),
+ ]),
+ "LoopItem": PortSchema(name="LoopItem", fields=[
+ PortField(name="currentItem", type="Any",
+ description="Aktuelles Element"),
+ PortField(name="currentIndex", type="int",
+ description="Aktueller Index"),
+ PortField(name="items", type="List[Any]",
+ description="Alle Elemente"),
+ PortField(name="count", type="int",
+ description="Gesamtanzahl"),
+ ]),
+ "AggregateResult": PortSchema(name="AggregateResult", fields=[
+ PortField(name="items", type="List[Any]",
+ description="Gesammelte Elemente"),
+ PortField(name="count", type="int",
+ description="Anzahl"),
+ ]),
+ "MergeResult": PortSchema(name="MergeResult", fields=[
+ PortField(name="inputs", type="Dict[int,Any]",
+ description="Eingaben nach Port"),
+ PortField(name="first", type="Any",
+ description="Erstes verfügbares"),
+ PortField(name="merged", type="Dict",
+ description="Zusammengeführte Daten"),
+ ]),
+ "ActionResult": PortSchema(name="ActionResult", fields=[
+ PortField(name="success", type="bool",
+ description="Erfolg"),
+ PortField(name="error", type="str", required=False,
+ description="Fehler"),
+ PortField(name="data", type="Dict", required=False,
+ description="Ergebnisdaten"),
+ ]),
+ "Transit": PortSchema(name="Transit", fields=[]),
+}
+
+
+# ---------------------------------------------------------------------------
+# SYSTEM_VARIABLES
+# ---------------------------------------------------------------------------
+
+SYSTEM_VARIABLES: Dict[str, Dict[str, str]] = {
+ "system.timestamp": {"type": "int", "description": "Unix timestamp (ms)"},
+ "system.date": {"type": "str", "description": "ISO date (YYYY-MM-DD)"},
+ "system.datetime": {"type": "str", "description": "ISO datetime"},
+ "system.time": {"type": "str", "description": "HH:MM:SS"},
+ "system.userId": {"type": "str", "description": "Current user ID"},
+ "system.userName": {"type": "str", "description": "Current user name"},
+ "system.userEmail": {"type": "str", "description": "Current user email"},
+ "system.workflowId": {"type": "str", "description": "Workflow ID"},
+ "system.runId": {"type": "str", "description": "Run ID"},
+ "system.instanceId": {"type": "str", "description": "Feature instance ID"},
+ "system.mandateId": {"type": "str", "description": "Mandate ID"},
+ "system.loopIndex": {"type": "int", "description": "Current loop index (only in loop)"},
+ "system.loopCount": {"type": "int", "description": "Loop item count (only in loop)"},
+ "system.uuid": {"type": "str", "description": "Random UUID"},
+}
+
+
+def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any:
+ """Resolve a system variable name to its runtime value."""
+ from datetime import datetime, timezone
+
+ now = datetime.now(timezone.utc)
+ mapping = {
+ "system.timestamp": lambda: int(now.timestamp() * 1000),
+ "system.date": lambda: now.strftime("%Y-%m-%d"),
+ "system.datetime": lambda: now.isoformat(),
+ "system.time": lambda: now.strftime("%H:%M:%S"),
+ "system.userId": lambda: context.get("userId", ""),
+ "system.userName": lambda: context.get("userName", ""),
+ "system.userEmail": lambda: context.get("userEmail", ""),
+ "system.workflowId": lambda: context.get("workflowId", ""),
+ "system.runId": lambda: context.get("_runId", ""),
+ "system.instanceId": lambda: context.get("instanceId", ""),
+ "system.mandateId": lambda: context.get("mandateId", ""),
+ "system.loopIndex": lambda: (context.get("_loopState") or {}).get("currentIndex", -1),
+ "system.loopCount": lambda: len((context.get("_loopState") or {}).get("items", [])),
+ "system.uuid": lambda: str(uuid.uuid4()),
+ }
+ resolver = mapping.get(variable)
+ if resolver:
+ return resolver()
+ logger.warning("Unknown system variable: %s", variable)
+ return None
+
+
+# ---------------------------------------------------------------------------
+# Output normalizers
+# ---------------------------------------------------------------------------
+
+def _normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
+ """
+ Normalize raw executor output to match the declared port schema.
+ Ensures _success/_error meta-fields are always present.
+ """
+ if not isinstance(raw, dict):
+ raw = {"value": raw} if raw is not None else {}
+
+ result = dict(raw)
+ result.setdefault("_success", not bool(raw.get("error")))
+ result.setdefault("_error", raw.get("error"))
+
+ schema = PORT_TYPE_CATALOG.get(schemaName)
+ if not schema or schemaName == "Transit":
+ return result
+
+ for field in schema.fields:
+ if field.name not in result:
+ result[field.name] = _defaultForType(field.type)
+
+ return result
+
+
+def _defaultForType(typeStr: str) -> Any:
+ """Return a sensible default for a type string."""
+ if typeStr.startswith("List"):
+ return []
+ if typeStr.startswith("Dict"):
+ return {}
+ if typeStr == "bool":
+ return False
+ if typeStr == "int":
+ return 0
+ if typeStr == "str":
+ return ""
+ return None
+
+
+def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]:
+ """Build an error envelope matching the schema with _success=False."""
+ result = {"_success": False, "_error": str(error)}
+ schema = PORT_TYPE_CATALOG.get(schemaName)
+ if schema:
+ for field in schema.fields:
+ result.setdefault(field.name, _defaultForType(field.type))
+ return result
+
+
+# ---------------------------------------------------------------------------
+# Input extractors (one per input port type)
+# ---------------------------------------------------------------------------
+
+def _extractEmailDraft(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract EmailDraft fields from upstream output."""
+ result = {}
+ if upstream.get("responseData") and isinstance(upstream["responseData"], dict):
+ rd = upstream["responseData"]
+ for key in ("subject", "body", "to", "cc"):
+ if key in rd:
+ result[key] = rd[key]
+ if not result:
+ for key in ("subject", "body", "to", "cc"):
+ if key in upstream:
+ result[key] = upstream[key]
+ return result
+
+
+def _extractDocuments(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract documents from upstream output."""
+ docs = upstream.get("documents") or upstream.get("documentList") or []
+ if not docs and isinstance(upstream.get("data"), dict):
+ docs = upstream["data"].get("documents") or upstream["data"].get("documentList") or []
+ # input.upload format
+ if not docs:
+ files = upstream.get("files") or []
+ fileObj = upstream.get("file")
+ fileIds = upstream.get("fileIds") or []
+ if fileObj:
+ docs = [fileObj]
+ elif files:
+ docs = files
+ elif fileIds:
+ docs = [{"validationMetadata": {"fileId": fid}} for fid in fileIds]
+ return {"documents": docs if isinstance(docs, list) else [docs]} if docs else {}
+
+
+def _extractText(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract text from upstream output."""
+ text = upstream.get("text") or upstream.get("response") or upstream.get("context") or ""
+ if not text and upstream.get("payload"):
+ import json
+ payload = upstream["payload"]
+ text = json.dumps(payload, ensure_ascii=False) if isinstance(payload, dict) else str(payload)
+ return {"text": str(text)} if text else {}
+
+
+def _extractEmailList(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract email list from upstream output."""
+ emails = upstream.get("emails") or []
+ if not emails:
+ docs = upstream.get("documents") or upstream.get("documentList") or []
+ if docs:
+ import json
+ for doc in docs:
+ raw = doc.get("documentData") if isinstance(doc, dict) else None
+ if raw:
+ try:
+ data = json.loads(raw) if isinstance(raw, str) else raw
+ if isinstance(data, dict):
+ found = (data.get("emails", {}).get("emails", [])
+ or data.get("searchResults", {}).get("results", []))
+ if found:
+ emails = found
+ break
+ except (json.JSONDecodeError, TypeError):
+ pass
+ return {"emails": emails} if emails else {}
+
+
+def _extractTaskList(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract task list from upstream output."""
+ tasks = upstream.get("tasks") or []
+ if not tasks:
+ docs = upstream.get("documents") or upstream.get("documentList") or []
+ if docs:
+ import json
+ for doc in docs:
+ raw = doc.get("documentData") if isinstance(doc, dict) else None
+ if raw:
+ try:
+ data = json.loads(raw) if isinstance(raw, str) else raw
+ if isinstance(data, dict) and "tasks" in data:
+ tasks = data["tasks"]
+ break
+ except (json.JSONDecodeError, TypeError):
+ pass
+ return {"tasks": tasks} if tasks else {}
+
+
+def _extractFileList(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract file list from upstream output."""
+ files = upstream.get("files") or []
+ return {"files": files} if files else {}
+
+
+def _extractFormPayload(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract form payload from upstream output."""
+ payload = upstream.get("payload")
+ if payload and isinstance(payload, dict):
+ return {"payload": payload}
+ return {}
+
+
+def _extractAiResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract AI result fields from upstream output."""
+ result = {}
+ for key in ("prompt", "response", "responseData", "context", "documents"):
+ if key in upstream:
+ result[key] = upstream[key]
+ return result
+
+
+def _extractBoolResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract bool result from upstream output."""
+ result = upstream.get("result")
+ if isinstance(result, bool):
+ return {"result": result, "reason": upstream.get("reason", "")}
+ approved = upstream.get("approved")
+ if isinstance(approved, bool):
+ return {"result": approved, "reason": upstream.get("reason", "")}
+ return {}
+
+
+def _extractTaskResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract task result from upstream output."""
+ result = {}
+ if "taskId" in upstream:
+ result["taskId"] = upstream["taskId"]
+ if "task" in upstream:
+ result["task"] = upstream["task"]
+ elif "clickupTask" in upstream:
+ result["task"] = upstream["clickupTask"]
+ if "success" in upstream:
+ result["success"] = upstream["success"]
+ return result
+
+
+def _extractAggregateResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract aggregate result from upstream output."""
+ items = upstream.get("items") or []
+ return {"items": items, "count": len(items)}
+
+
+def _extractMergeResult(upstream: Dict[str, Any]) -> Dict[str, Any]:
+ """Extract merge result from upstream output."""
+ return {
+ "inputs": upstream.get("inputs", {}),
+ "first": upstream.get("first"),
+ "merged": upstream.get("merged", {}),
+ }
+
+
+INPUT_EXTRACTORS: Dict[str, Callable] = {
+ "EmailDraft": _extractEmailDraft,
+ "DocumentList": _extractDocuments,
+ "TextResult": _extractText,
+ "EmailList": _extractEmailList,
+ "TaskList": _extractTaskList,
+ "FileList": _extractFileList,
+ "FormPayload": _extractFormPayload,
+ "AiResult": _extractAiResult,
+ "BoolResult": _extractBoolResult,
+ "TaskResult": _extractTaskResult,
+ "AggregateResult": _extractAggregateResult,
+ "MergeResult": _extractMergeResult,
+}
+
+
+# ---------------------------------------------------------------------------
+# Transit helpers
+# ---------------------------------------------------------------------------
+
+def _wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]:
+ """Wrap data in a Transit envelope."""
+ return {"_transit": True, "_meta": meta, "data": data}
+
+
+def _unwrapTransit(output: Any) -> Any:
+ """Unwrap a Transit envelope, returning the inner data."""
+ if isinstance(output, dict) and output.get("_transit"):
+ return output.get("data")
+ return output
+
+
+def _resolveTransitChain(
+ nodeId: str,
+ nodeOutputs: Dict[str, Any],
+ connectionMap: Dict[str, list],
+) -> Any:
+ """
+ Follow _transit chain backwards until a real (non-transit) producer is found.
+ Returns the unwrapped output of the real producer.
+ """
+ visited = set()
+ current = nodeId
+ while current and current not in visited:
+ visited.add(current)
+ out = nodeOutputs.get(current)
+ if not isinstance(out, dict) or not out.get("_transit"):
+ return out
+ sources = connectionMap.get(current, [])
+ if not sources:
+ return _unwrapTransit(out)
+ srcId = sources[0][0] if sources else None
+ if not srcId:
+ return _unwrapTransit(out)
+ current = srcId
+ return nodeOutputs.get(nodeId)
+
+
+# ---------------------------------------------------------------------------
+# Schema derivation for dynamic outputs
+# ---------------------------------------------------------------------------
+
+def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
+ """Derive output schema from form field definitions."""
+ fields_param = (node.get("parameters") or {}).get("fields")
+ if not fields_param or not isinstance(fields_param, list):
+ return None
+ portFields = []
+ for f in fields_param:
+ if isinstance(f, dict) and f.get("name"):
+ _lab = f.get("label")
+ _desc = resolveText(_lab) if _lab is not None else f["name"]
+ if not _desc.strip():
+ _desc = f["name"]
+ portFields.append(PortField(
+ name=f["name"],
+ type=f.get("type", "str"),
+ description=_desc,
+ required=f.get("required", False),
+ ))
+ return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None
+
+
+def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]:
+ """Derive output schema from transform mappings."""
+ mappings = (node.get("parameters") or {}).get("mappings")
+ if not mappings or not isinstance(mappings, list):
+ return None
+ portFields = []
+ for m in mappings:
+ if isinstance(m, dict) and m.get("outputField"):
+ portFields.append(PortField(
+ name=m["outputField"],
+ type=m.get("type", "str"),
+ description=str(m.get("label", m["outputField"])),
+ ))
+ return PortSchema(name="Transform_dynamic", fields=portFields) if portFields else None
diff --git a/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
new file mode 100644
index 00000000..4fc2293e
--- /dev/null
+++ b/modules/features/graphicalEditor/routeFeatureGraphicalEditor.py
@@ -0,0 +1,1168 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+GraphicalEditor routes - node-types, execute, workflows, runs, tasks, connections, browse.
+"""
+
+import asyncio
+import json
+import logging
+import math
+from typing import Any, Dict, List, Optional
+
+from fastapi import APIRouter, Depends, Path, Query, Body, Request, HTTPException
+from fastapi.responses import JSONResponse, StreamingResponse
+from modules.auth import limiter, getRequestContext, RequestContext
+from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
+from modules.routes.routeDataUsers import _applyFiltersAndSort
+
+from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices
+from modules.features.graphicalEditor.nodeRegistry import getNodeTypesForApi
+from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+from modules.workflows.automation2.executionEngine import executeGraph
+from modules.workflows.automation2.runEnvelope import (
+ default_run_envelope,
+ merge_run_envelope,
+ normalize_run_envelope,
+)
+from modules.features.graphicalEditor.entryPoints import find_invocation
+from modules.shared.i18nRegistry import apiRouteContext, resolveText
+routeApiMsg = apiRouteContext("routeFeatureGraphicalEditor")
+
+logger = logging.getLogger(__name__)
+
+
+def _build_execute_run_envelope(
+ body: Dict[str, Any],
+ workflow: Optional[Dict[str, Any]],
+ user_id: Optional[str],
+ requestLang: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Build normalized run envelope from POST /execute body."""
+ if isinstance(body.get("runEnvelope"), dict):
+ env = normalize_run_envelope(body["runEnvelope"], user_id=user_id)
+ pl = body.get("payload")
+ if isinstance(pl, dict):
+ env = merge_run_envelope(env, {"payload": pl})
+ return env
+
+ entry_point_id = body.get("entryPointId")
+ if entry_point_id:
+ if not workflow:
+ raise HTTPException(
+ status_code=400,
+ detail=routeApiMsg("entryPointId requires a saved workflow (workflowId must refer to a stored workflow)"),
+ )
+ inv = find_invocation(workflow, entry_point_id)
+ if not inv:
+ raise HTTPException(status_code=400, detail=routeApiMsg("entryPointId not found on workflow"))
+ if not inv.get("enabled", True):
+ raise HTTPException(status_code=400, detail=routeApiMsg("entry point is disabled"))
+ kind = inv.get("kind", "manual")
+ trig_map = {
+ "manual": "manual",
+ "form": "form",
+ "schedule": "schedule",
+ "always_on": "event",
+ "email": "email",
+ "webhook": "webhook",
+ "api": "api",
+ "event": "event",
+ }
+ trig = trig_map.get(kind, "manual")
+ title = inv.get("title") or {}
+ label = resolveText(title)
+ base = default_run_envelope(
+ trig,
+ entry_point_id=inv.get("id"),
+ entry_point_label=label or None,
+ )
+ pl = body.get("payload")
+ if isinstance(pl, dict):
+ base = merge_run_envelope(base, {"payload": pl})
+ return normalize_run_envelope(base, user_id=user_id)
+
+ env = normalize_run_envelope(None, user_id=user_id)
+ pl = body.get("payload")
+ if isinstance(pl, dict):
+ env = merge_run_envelope(env, {"payload": pl})
+ return env
+
+router = APIRouter(
+ prefix="/api/workflows",
+ tags=["GraphicalEditor"],
+ responses={404: {"description": "Not found"}, 403: {"description": "Forbidden"}},
+)
+
+
+def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
+ """Validate user has access to the graphicalEditor feature instance. Returns mandateId."""
+ from fastapi import HTTPException
+ from modules.interfaces.interfaceDbApp import getRootInterface
+
+ rootInterface = getRootInterface()
+ instance = rootInterface.getFeatureInstance(instanceId)
+ if not instance:
+ raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
+ featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
+ if not featureAccess or not featureAccess.enabled:
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied to this feature instance"))
+ return str(instance.mandateId) if instance.mandateId else ""
+
+
+@router.get("/{instanceId}/node-types")
+@limiter.limit("60/minute")
+def get_node_types(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ language: str = Query("en", description="Localization (en, de, fr)"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Return node types for the flow builder: static + I/O from methodDiscovery."""
+ logger.info("graphicalEditor node-types request: instanceId=%s language=%s", instanceId, language)
+ mandateId = _validateInstanceAccess(instanceId, context)
+ services = getGraphicalEditorServices(
+ context.user,
+ mandateId=mandateId,
+ featureInstanceId=instanceId,
+ )
+ result = getNodeTypesForApi(services, language=language)
+ logger.info(
+ "graphicalEditor node-types response: %d nodeTypes %d categories",
+ len(result.get("nodeTypes", [])),
+ len(result.get("categories", [])),
+ )
+ return result
+
+
+@router.post("/{instanceId}/execute")
+@limiter.limit("30/minute")
+async def post_execute(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ body: dict = Body(..., description="{ workflowId?, graph: { nodes, connections } }"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Execute workflow graph. Body: { workflowId?, graph: { nodes, connections } }."""
+ userId = str(context.user.id) if context.user else None
+ logger.info(
+ "graphicalEditor execute request: instanceId=%s userId=%s body_keys=%s",
+ instanceId,
+ userId,
+ list(body.keys()),
+ )
+ mandateId = _validateInstanceAccess(instanceId, context)
+ services = getGraphicalEditorServices(
+ context.user,
+ mandateId=mandateId,
+ featureInstanceId=instanceId,
+ )
+ from modules.workflows.processing.shared.methodDiscovery import discoverMethods
+ discoverMethods(services)
+
+ graph = body.get("graph") or body
+ workflowId = body.get("workflowId")
+ req_nodes = graph.get("nodes") or []
+ workflow_for_envelope: Optional[Dict[str, Any]] = None
+ if workflowId and not str(workflowId).startswith("transient-"):
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ workflow_for_envelope = iface.getWorkflow(workflowId)
+ if workflowId and len(req_nodes) == 0:
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if wf and wf.get("graph"):
+ graph = wf["graph"]
+ logger.info("graphicalEditor execute: loaded graph from workflow %s", workflowId)
+ workflow_for_envelope = wf
+ if not workflowId:
+ import uuid
+ workflowId = f"transient-{uuid.uuid4().hex[:12]}"
+ logger.info("graphicalEditor execute: using transient workflowId=%s", workflowId)
+ nodes_count = len(graph.get("nodes") or [])
+ connections_count = len(graph.get("connections") or [])
+ logger.info(
+ "graphicalEditor execute: graph nodes=%d connections=%d workflowId=%s mandateId=%s",
+ nodes_count,
+ connections_count,
+ workflowId,
+ mandateId,
+ )
+ run_env = _build_execute_run_envelope(
+ body,
+ workflow_for_envelope,
+ userId,
+ getattr(context.user, "language", None) if context.user else None,
+ )
+
+ _wfLabel = None
+ if workflow_for_envelope:
+ _wfLabel = workflow_for_envelope.get("label") if isinstance(workflow_for_envelope, dict) else getattr(workflow_for_envelope, "label", None)
+
+ ge_interface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ result = await executeGraph(
+ graph=graph,
+ services=services,
+ workflowId=workflowId,
+ instanceId=instanceId,
+ userId=userId,
+ mandateId=mandateId,
+ automation2_interface=ge_interface,
+ run_envelope=run_env,
+ label=_wfLabel,
+ )
+ logger.info(
+ "graphicalEditor execute result: success=%s error=%s nodeOutputs_keys=%s failedNode=%s paused=%s",
+ result.get("success"),
+ result.get("error"),
+ list(result.get("nodeOutputs", {}).keys()) if result.get("nodeOutputs") else [],
+ result.get("failedNode"),
+ result.get("paused"),
+ )
+ return result
+
+
+# -------------------------------------------------------------------------
+# Run Tracing SSE Stream
+# -------------------------------------------------------------------------
+
+
+@router.get("/{instanceId}/runs/{runId}/stream")
+async def get_run_stream(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ runId: str = Path(..., description="Run ID"),
+ context: RequestContext = Depends(getRequestContext),
+):
+ """SSE stream for live step-log updates during a workflow run."""
+ _validateInstanceAccess(instanceId, context)
+
+ from modules.serviceCenter.core.serviceStreaming.eventManager import get_event_manager
+ sseEventManager = get_event_manager()
+ queueId = f"run-trace-{runId}"
+ sseEventManager.create_queue(queueId)
+
+ async def _sseGenerator():
+ queue = sseEventManager.get_queue(queueId)
+ if not queue:
+ return
+ while True:
+ try:
+ event = await asyncio.wait_for(queue.get(), timeout=30)
+ except asyncio.TimeoutError:
+ yield "data: {\"type\": \"keepalive\"}\n\n"
+ continue
+ if event is None:
+ break
+ payload = event.get("data", event) if isinstance(event, dict) else event
+ yield f"data: {json.dumps(payload, default=str)}\n\n"
+ eventType = payload.get("type", "") if isinstance(payload, dict) else ""
+ if eventType in ("run_complete", "run_failed"):
+ break
+ await sseEventManager.cleanup(queueId, delay=10)
+
+ return StreamingResponse(
+ _sseGenerator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "X-Accel-Buffering": "no",
+ },
+ )
+
+
+# -------------------------------------------------------------------------
+# Versions (AutoVersion Lifecycle)
+# -------------------------------------------------------------------------
+
+
+@router.get("/{instanceId}/workflows/{workflowId}/versions")
+@limiter.limit("60/minute")
+def get_versions(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """List all versions for a workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ versions = iface.getVersions(workflowId)
+ return {"versions": versions}
+
+
+@router.post("/{instanceId}/workflows/{workflowId}/versions/draft")
+@limiter.limit("30/minute")
+def create_draft_version(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Create a new draft version from the workflow's current graph."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ version = iface.createDraftVersion(workflowId)
+ if not version:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ return version
+
+
+@router.post("/{instanceId}/versions/{versionId}/publish")
+@limiter.limit("30/minute")
+def publish_version(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ versionId: str = Path(..., description="Version ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Publish a draft version. Archives the previously published version."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ userId = str(context.user.id) if context.user else None
+ version = iface.publishVersion(versionId, userId=userId)
+ if not version:
+ raise HTTPException(status_code=400, detail=routeApiMsg("Version not found or not in draft status"))
+ return version
+
+
+@router.post("/{instanceId}/versions/{versionId}/unpublish")
+@limiter.limit("30/minute")
+def unpublish_version(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ versionId: str = Path(..., description="Version ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Unpublish a version (revert to draft)."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ version = iface.unpublishVersion(versionId)
+ if not version:
+ raise HTTPException(status_code=400, detail=routeApiMsg("Version not found or not published"))
+ return version
+
+
+@router.post("/{instanceId}/versions/{versionId}/archive")
+@limiter.limit("30/minute")
+def archive_version(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ versionId: str = Path(..., description="Version ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Archive a version."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ version = iface.archiveVersion(versionId)
+ if not version:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Version not found"))
+ return version
+
+
+# -------------------------------------------------------------------------
+# Templates
+# -------------------------------------------------------------------------
+
+
+@router.get("/{instanceId}/templates")
+@limiter.limit("60/minute")
+def get_templates(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ scope: Optional[str] = Query(None, description="Filter by scope: user, instance, mandate, system"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
+ context: RequestContext = Depends(getRequestContext),
+):
+ """List workflow templates with optional pagination."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ templates = iface.getTemplates(scope=scope)
+
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except (json.JSONDecodeError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
+
+ if paginationParams:
+ filtered = _applyFiltersAndSort(templates, paginationParams)
+ totalItems = len(filtered)
+ totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
+ startIdx = (paginationParams.page - 1) * paginationParams.pageSize
+ endIdx = startIdx + paginationParams.pageSize
+ return {
+ "items": filtered[startIdx:endIdx],
+ "pagination": PaginationMetadata(
+ currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
+ totalItems=totalItems, totalPages=totalPages,
+ sort=paginationParams.sort, filters=paginationParams.filters,
+ ).model_dump(),
+ }
+ return {"templates": templates}
+
+
+@router.post("/{instanceId}/templates/from-workflow")
+@limiter.limit("30/minute")
+def create_template_from_workflow(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ body: dict = Body(..., description="{ workflowId, scope? }"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Create a template from an existing workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ workflowId = body.get("workflowId")
+ scope = body.get("scope", "user")
+ if not workflowId:
+ raise HTTPException(status_code=400, detail=routeApiMsg("workflowId required"))
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ template = iface.createTemplateFromWorkflow(workflowId, scope=scope)
+ if not template:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ return template
+
+
+@router.post("/{instanceId}/templates/{templateId}/copy")
+@limiter.limit("30/minute")
+def copy_template(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ templateId: str = Path(..., description="Template ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Copy a template to a new user-owned workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ workflow = iface.copyTemplateToUser(templateId)
+ if not workflow:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Template not found"))
+ return workflow
+
+
+@router.post("/{instanceId}/templates/{templateId}/share")
+@limiter.limit("30/minute")
+def share_template(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ templateId: str = Path(..., description="Template ID"),
+ body: dict = Body(..., description="{ scope }"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Share a template by changing its scope."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ scope = body.get("scope")
+ if not scope or scope not in ("user", "instance", "mandate", "system"):
+ raise HTTPException(status_code=400, detail=routeApiMsg("scope must be user, instance, mandate, or system"))
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ template = iface.shareTemplate(templateId, scope=scope)
+ if not template:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Template not found"))
+ return template
+
+
+# -------------------------------------------------------------------------
+# AI Chat for Editor
+# -------------------------------------------------------------------------
+
+
+@router.post("/{instanceId}/{workflowId}/chat/stream")
+@limiter.limit("30/minute")
+async def post_editor_chat(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ body: dict = Body(..., description="{ message, conversationHistory?, userLanguage? }"),
+ context: RequestContext = Depends(getRequestContext),
+):
+ """AI chat endpoint for the editor with SSE streaming. Uses workflow tools to mutate the graph."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ message = body.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail=routeApiMsg("message required"))
+
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+
+ userLanguage = body.get("userLanguage", "de")
+ conversationHistory = body.get("conversationHistory") or []
+ fileIds = body.get("fileIds") or []
+ dataSourceIds = body.get("dataSourceIds") or []
+ featureDataSourceIds = body.get("featureDataSourceIds") or []
+
+ from modules.serviceCenter.core.serviceStreaming import get_event_manager
+ sseEventManager = get_event_manager()
+ queueId = f"ge-chat-{workflowId}-{id(request)}"
+ sseEventManager.create_queue(queueId)
+
+ agentTask = asyncio.ensure_future(
+ _runEditorAgent(
+ workflowId=workflowId,
+ queueId=queueId,
+ prompt=message,
+ instanceId=instanceId,
+ user=context.user,
+ mandateId=mandateId,
+ sseEventManager=sseEventManager,
+ userLanguage=userLanguage,
+ conversationHistory=conversationHistory,
+ fileIds=fileIds,
+ dataSourceIds=dataSourceIds,
+ featureDataSourceIds=featureDataSourceIds,
+ )
+ )
+ sseEventManager.register_agent_task(queueId, agentTask)
+
+ async def _sseGenerator():
+ queue = sseEventManager.get_queue(queueId)
+ if not queue:
+ return
+ while True:
+ try:
+ event = await asyncio.wait_for(queue.get(), timeout=120)
+ except asyncio.TimeoutError:
+ yield "data: {\"type\": \"keepalive\"}\n\n"
+ continue
+ if event is None:
+ break
+ ssePayload = event.get("data", event) if isinstance(event, dict) else event
+ yield f"data: {json.dumps(ssePayload, default=str)}\n\n"
+ eventType = ssePayload.get("type", "") if isinstance(ssePayload, dict) else ""
+ if eventType in ("complete", "error", "stopped"):
+ break
+ await sseEventManager.cleanup(queueId, delay=30)
+
+ return StreamingResponse(
+ _sseGenerator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "X-Accel-Buffering": "no",
+ },
+ )
+
+
+async def _runEditorAgent(
+ workflowId: str,
+ queueId: str,
+ prompt: str,
+ instanceId: str,
+ user=None,
+ mandateId: str = "",
+ sseEventManager=None,
+ userLanguage: str = "de",
+ conversationHistory: List[Dict[str, Any]] = None,
+ fileIds: List[str] = None,
+ dataSourceIds: List[str] = None,
+ featureDataSourceIds: List[str] = None,
+):
+ """Run the serviceAgent loop with workflow toolbox and forward events to the SSE queue."""
+ try:
+ from modules.serviceCenter import getService
+ from modules.serviceCenter.context import ServiceCenterContext
+ from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentEventTypeEnum
+
+ ctx = ServiceCenterContext(
+ user=user,
+ mandate_id=mandateId,
+ feature_instance_id=instanceId,
+ workflow_id=workflowId,
+ feature_code="graphicalEditor",
+ )
+ agentService = getService("agent", ctx)
+
+ systemPrompt = (
+ "You are a workflow editor assistant. The user describes changes to a workflow graph. "
+ "Use the available workflow tools (readWorkflowGraph, addNode, removeNode, connectNodes, "
+ "setNodeParameter, listAvailableNodeTypes, validateGraph) to modify the graph. "
+ "Always read the current graph first before making changes. "
+ "Respond concisely and confirm what you changed."
+ )
+
+ enrichedPrompt = prompt
+ if dataSourceIds:
+ from modules.features.workspace.routeFeatureWorkspace import _buildDataSourceContext
+ chatSvc = getService("chat", ctx)
+ dsInfo = _buildDataSourceContext(chatSvc, dataSourceIds)
+ if dsInfo:
+ enrichedPrompt = f"{prompt}\n\n[Active Data Sources]\n{dsInfo}"
+
+ if featureDataSourceIds:
+ from modules.features.workspace.routeFeatureWorkspace import _buildFeatureDataSourceContext
+ fdsInfo = _buildFeatureDataSourceContext(featureDataSourceIds)
+ if fdsInfo:
+ enrichedPrompt = f"{enrichedPrompt}\n\n[Attached Feature Data Sources]\n{fdsInfo}"
+
+ accumulatedText = ""
+
+ async for event in agentService.runAgent(
+ prompt=enrichedPrompt,
+ fileIds=fileIds or [],
+ workflowId=workflowId,
+ userLanguage=userLanguage,
+ conversationHistory=conversationHistory or [],
+ toolSet="core",
+ additionalTools=None,
+ systemPromptOverride=systemPrompt,
+ ):
+ if sseEventManager.is_cancelled(queueId):
+ logger.info("Editor chat agent cancelled for workflow %s", workflowId)
+ break
+
+ if event.type == AgentEventTypeEnum.CHUNK and event.content:
+ accumulatedText += event.content
+
+ sseEvent = {
+ "type": event.type.value if hasattr(event.type, "value") else event.type,
+ "workflowId": workflowId,
+ }
+ if event.content:
+ sseEvent["content"] = event.content
+ if event.data:
+ sseEvent["item"] = event.data
+
+ await sseEventManager.emit_event(queueId, sseEvent["type"], sseEvent)
+
+ if event.type in (AgentEventTypeEnum.FINAL, AgentEventTypeEnum.ERROR):
+ break
+
+ await sseEventManager.emit_event(queueId, "complete", {
+ "type": "complete",
+ "workflowId": workflowId,
+ })
+
+ except asyncio.CancelledError:
+ logger.info("Editor chat agent task cancelled for workflow %s", workflowId)
+ await sseEventManager.emit_event(queueId, "stopped", {
+ "type": "stopped",
+ "workflowId": workflowId,
+ })
+
+ except Exception as e:
+ logger.error("Editor chat agent error: %s", e, exc_info=True)
+ await sseEventManager.emit_event(queueId, "error", {
+ "type": "error",
+ "content": str(e),
+ "workflowId": workflowId,
+ })
+ finally:
+ sseEventManager._unregister_agent_task(queueId)
+
+
+# -------------------------------------------------------------------------
+# Connections and Browse (for Email/SharePoint node config)
+# -------------------------------------------------------------------------
+
+
+def _buildResolverDbInterface(chatService):
+ """Build a DB adapter that ConnectorResolver can use to load UserConnections."""
+ class _ResolverDbAdapter:
+ def __init__(self, appInterface):
+ self._app = appInterface
+
+ def getUserConnection(self, connectionId: str):
+ if hasattr(self._app, "getUserConnectionById"):
+ return self._app.getUserConnectionById(connectionId)
+ return None
+
+ appIf = getattr(chatService, "interfaceDbApp", None)
+ if appIf:
+ return _ResolverDbAdapter(appIf)
+ return getattr(chatService, "interfaceDbComponent", None)
+
+
+@router.get("/{instanceId}/connections")
+@limiter.limit("300/minute")
+def list_connections(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Return the user's active connections (UserConnections) for Email/SharePoint node config."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ from modules.serviceCenter import getService
+ from modules.serviceCenter.context import ServiceCenterContext
+ ctx = ServiceCenterContext(
+ user=context.user,
+ mandate_id=str(context.mandateId) if context.mandateId else mandateId,
+ feature_instance_id=instanceId,
+ )
+ chatService = getService("chat", ctx)
+ connections = chatService.getUserConnections()
+ items = []
+ for c in connections or []:
+ conn = c if isinstance(c, dict) else (c.model_dump() if hasattr(c, "model_dump") else {})
+ authority = conn.get("authority")
+ if hasattr(authority, "value"):
+ authority = authority.value
+ status = conn.get("status")
+ if hasattr(status, "value"):
+ status = status.value
+ items.append({
+ "id": conn.get("id"),
+ "authority": authority,
+ "externalUsername": conn.get("externalUsername"),
+ "externalEmail": conn.get("externalEmail"),
+ "status": status,
+ })
+ return {"connections": items}
+
+
+@router.get("/{instanceId}/connections/{connectionId}/services")
+@limiter.limit("120/minute")
+async def list_connection_services(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ connectionId: str = Path(..., description="Connection ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Return the available services for a specific UserConnection."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ try:
+ from modules.connectors.connectorResolver import ConnectorResolver
+ from modules.serviceCenter import getService as getSvc
+ from modules.serviceCenter.context import ServiceCenterContext
+ ctx = ServiceCenterContext(
+ user=context.user,
+ mandate_id=str(context.mandateId) if context.mandateId else mandateId,
+ feature_instance_id=instanceId,
+ )
+ chatService = getSvc("chat", ctx)
+ securityService = getSvc("security", ctx)
+ dbInterface = _buildResolverDbInterface(chatService)
+ resolver = ConnectorResolver(securityService, dbInterface)
+ provider = await resolver.resolve(connectionId)
+ services = provider.getAvailableServices()
+ _serviceLabels = {
+ "sharepoint": "SharePoint",
+ "clickup": "ClickUp",
+ "outlook": "Outlook",
+ "teams": "Teams",
+ "onedrive": "OneDrive",
+ "drive": "Google Drive",
+ "gmail": "Gmail",
+ "files": "Files (FTP)",
+ }
+ _serviceIcons = {
+ "sharepoint": "sharepoint",
+ "clickup": "folder",
+ "outlook": "mail",
+ "teams": "chat",
+ "onedrive": "cloud",
+ "drive": "cloud",
+ "gmail": "mail",
+ "files": "folder",
+ }
+ items = [
+ {"service": s, "label": _serviceLabels.get(s, s), "icon": _serviceIcons.get(s, "folder")}
+ for s in services
+ ]
+ return {"services": items}
+ except Exception as e:
+ logger.error(f"Error listing services for connection {connectionId}: {e}")
+ return JSONResponse({"services": [], "error": str(e)}, status_code=400)
+
+
+@router.get("/{instanceId}/connections/{connectionId}/browse")
+@limiter.limit("300/minute")
+async def browse_connection_service(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ connectionId: str = Path(..., description="Connection ID"),
+ service: str = Query(..., description="Service name (e.g. sharepoint, onedrive, outlook)"),
+ path: str = Query("/", description="Path within the service to browse"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Browse folders/items within a connection's service at a given path."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ try:
+ from modules.connectors.connectorResolver import ConnectorResolver
+ from modules.serviceCenter import getService as getSvc
+ from modules.serviceCenter.context import ServiceCenterContext
+ ctx = ServiceCenterContext(
+ user=context.user,
+ mandate_id=str(context.mandateId) if context.mandateId else mandateId,
+ feature_instance_id=instanceId,
+ )
+ chatService = getSvc("chat", ctx)
+ securityService = getSvc("security", ctx)
+ dbInterface = _buildResolverDbInterface(chatService)
+ resolver = ConnectorResolver(securityService, dbInterface)
+ adapter = await resolver.resolveService(connectionId, service)
+ entries = await adapter.browse(path, filter=None)
+ items = []
+ for entry in (entries or []):
+ items.append({
+ "name": entry.name,
+ "path": entry.path,
+ "isFolder": entry.isFolder,
+ "size": entry.size,
+ "mimeType": entry.mimeType,
+ "metadata": entry.metadata if hasattr(entry, "metadata") else {},
+ })
+ return {"items": items, "path": path, "service": service}
+ except Exception as e:
+ logger.error(f"Error browsing {service} for connection {connectionId} at '{path}': {e}")
+ return JSONResponse({"items": [], "error": str(e)}, status_code=400)
+
+
+# -------------------------------------------------------------------------
+# Workflow CRUD
+# -------------------------------------------------------------------------
+
+
+def _get_node_label_from_graph(graph: dict, nodeId: str) -> str:
+ """Extract human-readable label for a node from graph."""
+ if not graph or not nodeId:
+ return nodeId or ""
+ nodes = graph.get("nodes") or []
+ for n in nodes:
+ if n.get("id") == nodeId:
+ params = n.get("parameters") or {}
+ config = params.get("config") or {}
+ if isinstance(config, dict):
+ label = config.get("title") or config.get("label")
+ else:
+ label = None
+ return (
+ n.get("title")
+ or label
+ or params.get("title")
+ or params.get("label")
+ or n.get("type", "")
+ or nodeId
+ )
+ return nodeId or ""
+
+
+@router.get("/{instanceId}/workflows")
+@limiter.limit("60/minute")
+def get_workflows(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ active: Optional[bool] = Query(None, description="Filter by active: true|false"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
+ context: RequestContext = Depends(getRequestContext),
+):
+ """List all workflows for this feature instance."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ items = iface.getWorkflows(active=active)
+ enriched = []
+ for wf in items:
+ wf_id = wf.get("id")
+ runs = iface.getRunsByWorkflow(wf_id) if wf_id else []
+ run_count = len(runs)
+ active_run = None
+ last_started_at = None
+ for r in runs:
+ ts = r.get("sysCreatedAt")
+ if ts and (last_started_at is None or ts > last_started_at):
+ last_started_at = ts
+ if r.get("status") in ("running", "paused"):
+ active_run = r
+ stuck_at_node_id = active_run.get("currentNodeId") if active_run else None
+ stuck_at_node_label = ""
+ if stuck_at_node_id and wf.get("graph"):
+ stuck_at_node_label = _get_node_label_from_graph(wf["graph"], stuck_at_node_id)
+ enriched.append({
+ **wf,
+ "runCount": run_count,
+ "isRunning": active_run is not None,
+ "runStatus": active_run.get("status") if active_run else None,
+ "stuckAtNodeId": stuck_at_node_id,
+ "stuckAtNodeLabel": stuck_at_node_label or stuck_at_node_id or "",
+ "createdAt": wf.get("sysCreatedAt"),
+ "lastStartedAt": last_started_at,
+ })
+
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except (json.JSONDecodeError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
+
+ if paginationParams:
+ filtered = _applyFiltersAndSort(enriched, paginationParams)
+ totalItems = len(filtered)
+ totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
+ startIdx = (paginationParams.page - 1) * paginationParams.pageSize
+ endIdx = startIdx + paginationParams.pageSize
+ return {
+ "items": filtered[startIdx:endIdx],
+ "pagination": PaginationMetadata(
+ currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
+ totalItems=totalItems, totalPages=totalPages,
+ sort=paginationParams.sort, filters=paginationParams.filters,
+ ).model_dump(),
+ }
+ return {"workflows": enriched}
+
+
+@router.get("/{instanceId}/workflows/{workflowId}")
+@limiter.limit("60/minute")
+def get_workflow(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Get a single workflow by ID."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ return wf
+
+
+@router.post("/{instanceId}/workflows")
+@limiter.limit("30/minute")
+def create_workflow(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ body: dict = Body(..., description="{ label, graph }"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Create a new workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ created = iface.createWorkflow(body)
+ return created
+
+
+@router.put("/{instanceId}/workflows/{workflowId}")
+@limiter.limit("30/minute")
+def update_workflow(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ body: dict = Body(..., description="{ label?, graph? }"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Update a workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ updated = iface.updateWorkflow(workflowId, body)
+ if not updated:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ return updated
+
+
+@router.delete("/{instanceId}/workflows/{workflowId}")
+@limiter.limit("30/minute")
+def delete_workflow(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Delete a workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ if not iface.deleteWorkflow(workflowId):
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ return {"success": True}
+
+
+# -------------------------------------------------------------------------
+# Runs and Resume
+# -------------------------------------------------------------------------
+
+
+@router.get("/{instanceId}/runs/completed")
+@limiter.limit("60/minute")
+def get_completed_runs(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ limit: int = Query(20, ge=1, le=50),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Get recently completed runs with output."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ runs = iface.getRecentCompletedRuns(limit=limit)
+ return {"runs": runs}
+
+
+@router.get("/{instanceId}/workflows/{workflowId}/runs")
+@limiter.limit("60/minute")
+def get_workflow_runs(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Path(..., description="Workflow ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Get runs for a workflow."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ if not iface.getWorkflow(workflowId):
+ raise HTTPException(status_code=404, detail=routeApiMsg("Workflow not found"))
+ runs = iface.getRunsByWorkflow(workflowId)
+ return {"runs": runs}
+
+
+@router.get("/{instanceId}/runs/{runId}/steps")
+@limiter.limit("60/minute")
+def get_run_steps(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ runId: str = Path(..., description="Run ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Get step logs for a run (AutoStepLog entries)."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoStepLog
+ if not iface.db._ensureTableExists(AutoStepLog):
+ return {"steps": []}
+ records = iface.db.getRecordset(AutoStepLog, recordFilter={"runId": runId})
+ steps = [dict(r) for r in records] if records else []
+ steps.sort(key=lambda s: s.get("startedAt") or 0)
+ return {"steps": steps}
+
+
+# -------------------------------------------------------------------------
+# Tasks
+# -------------------------------------------------------------------------
+
+
+@router.get("/{instanceId}/tasks")
+@limiter.limit("60/minute")
+def get_tasks(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ workflowId: str = Query(None, description="Filter by workflow ID"),
+ status: str = Query(None, description="Filter: pending, completed, rejected"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Get tasks assigned to current user."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ assigneeId = str(context.user.id) if context.user else None
+ items = iface.getTasks(workflowId=workflowId, status=status, assigneeId=assigneeId)
+ workflows = {w["id"]: w for w in iface.getWorkflows()}
+ enriched = []
+ for t in items:
+ wf = workflows.get(t.get("workflowId") or "")
+ enriched.append({
+ **t,
+ "workflowLabel": wf.get("label", t.get("workflowId", "")) if wf else t.get("workflowId", ""),
+ "createdAt": t.get("sysCreatedAt"),
+ })
+ return {"tasks": enriched}
+
+
+@router.post("/{instanceId}/tasks/{taskId}/complete")
+@limiter.limit("30/minute")
+async def complete_task(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ taskId: str = Path(..., description="Task ID"),
+ body: dict = Body(..., description="{ result }"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Complete a task and resume the run."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+ task = iface.getTask(taskId)
+ if not task:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Task not found"))
+ runId = task.get("runId")
+ result = body.get("result")
+ if result is None:
+ raise HTTPException(status_code=400, detail=routeApiMsg("result required"))
+ run = iface.getRun(runId)
+ if not run:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+ if task.get("status") != "pending":
+ raise HTTPException(status_code=400, detail=routeApiMsg("Task already completed"))
+ iface.updateTask(taskId, status="completed", result=result)
+ nodeId = task.get("nodeId")
+ nodeOutputs = dict(run.get("nodeOutputs") or {})
+ nodeOutputs[nodeId] = result
+ workflowId = run.get("workflowId")
+ wf = iface.getWorkflow(workflowId) if workflowId else None
+ if not wf or not wf.get("graph"):
+ raise HTTPException(status_code=400, detail=routeApiMsg("Workflow graph not found"))
+ graph = wf["graph"]
+ services = getGraphicalEditorServices(context.user, mandateId=mandateId, featureInstanceId=instanceId)
+ return await executeGraph(
+ graph=graph,
+ services=services,
+ workflowId=workflowId,
+ instanceId=instanceId,
+ userId=str(context.user.id) if context.user else None,
+ mandateId=mandateId,
+ automation2_interface=iface,
+ initialNodeOutputs=nodeOutputs,
+ startAfterNodeId=nodeId,
+ runId=runId,
+ )
+
+
+# -------------------------------------------------------------------------
+# Monitoring / Metrics
+# -------------------------------------------------------------------------
+
+
+@router.get("/{instanceId}/metrics")
+@limiter.limit("60/minute")
+def get_metrics(
+ request: Request,
+ instanceId: str = Path(..., description="Feature instance ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Aggregated metrics for the monitoring dashboard."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+ iface = getGraphicalEditorInterface(context.user, mandateId, instanceId)
+
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
+ AutoWorkflow, AutoRun, AutoStepLog, AutoTask,
+ )
+
+ workflows = iface.db.getRecordset(AutoWorkflow, recordFilter={
+ "mandateId": mandateId, "featureInstanceId": instanceId, "isTemplate": False,
+ }) or []
+ runs = iface.db.getRecordset(AutoRun, recordFilter={
+ "workflowId": {"$in": [w.get("id") for w in workflows]} if workflows else "__none__",
+ }) or []
+ tasks = iface.db.getRecordset(AutoTask, recordFilter={
+ "workflowId": {"$in": [w.get("id") for w in workflows]} if workflows else "__none__",
+ }) or []
+
+ runsByStatus = {}
+ totalTokens = 0
+ totalCredits = 0.0
+ for r in runs:
+ s = r.get("status", "unknown")
+ runsByStatus[s] = runsByStatus.get(s, 0) + 1
+ totalTokens += r.get("costTokens", 0) or 0
+ totalCredits += r.get("costCredits", 0.0) or 0.0
+
+ tasksByStatus = {}
+ for t in tasks:
+ s = t.get("status", "unknown")
+ tasksByStatus[s] = tasksByStatus.get(s, 0) + 1
+
+ return {
+ "workflowCount": len(workflows),
+ "activeWorkflows": sum(1 for w in workflows if w.get("active")),
+ "totalRuns": len(runs),
+ "runsByStatus": runsByStatus,
+ "totalTasks": len(tasks),
+ "tasksByStatus": tasksByStatus,
+ "totalTokens": totalTokens,
+ "totalCredits": round(totalCredits, 4),
+ }
diff --git a/modules/features/neutralization/datamodelFeatureNeutralizer.py b/modules/features/neutralization/datamodelFeatureNeutralizer.py
index cc111950..cbaae3c4 100644
--- a/modules/features/neutralization/datamodelFeatureNeutralizer.py
+++ b/modules/features/neutralization/datamodelFeatureNeutralizer.py
@@ -7,7 +7,7 @@ from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
class DataScope(str, Enum):
@@ -17,83 +17,128 @@ class DataScope(str, Enum):
GLOBAL = "global"
+@i18nModel("Daten-Neutralisierung Konfiguration")
class DataNeutraliserConfig(PowerOnModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- mandateId: str = Field(description="ID of the mandate this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- featureInstanceId: str = Field(description="ID of the feature instance this configuration belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- userId: str = Field(description="ID of the user who created this configuration", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- enabled: bool = Field(default=True, description="Whether data neutralization is enabled", json_schema_extra={"frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False})
- scope: str = Field(default="personal", description="Data visibility scope: personal, featureInstance, mandate, global", json_schema_extra={"frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
- {"value": "personal", "label": {"en": "Personal", "de": "Persönlich"}},
- {"value": "featureInstance", "label": {"en": "Feature Instance", "de": "Feature-Instanz"}},
- {"value": "mandate", "label": {"en": "Mandate", "de": "Mandant"}},
- {"value": "global", "label": {"en": "Global", "de": "Global"}},
- ]})
- neutralizationStatus: str = Field(default="not_required", description="Status of neutralization: pending, completed, failed, not_required", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- namesToParse: str = Field(default="", description="Multiline list of names to parse for neutralization", json_schema_extra={"frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False})
- sharepointSourcePath: str = Field(default="", description="SharePoint path to read files for neutralization", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
- sharepointTargetPath: str = Field(default="", description="SharePoint path to store neutralized files", json_schema_extra={"frontend_type": "text", "frontend_readonly": False, "frontend_required": False})
-registerModelLabels(
- "DataNeutraliserConfig",
- {"en": "Data Neutralization Config", "fr": "Configuration de neutralisation des données"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "fr": "ID de mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "enabled": {"en": "Enabled", "fr": "Activé"},
- "scope": {"en": "Scope", "fr": "Portée"},
- "neutralizationStatus": {"en": "Neutralization Status", "fr": "Statut de neutralisation"},
- "namesToParse": {"en": "Names to Parse", "fr": "Noms à analyser"},
- "sharepointSourcePath": {"en": "Source Path", "fr": "Chemin source"},
- "sharepointTargetPath": {"en": "Target Path", "fr": "Chemin cible"},
- },
-)
+ """Konfiguration fuer die Daten-Neutralisierung."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Unique ID of the configuration",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ mandateId: str = Field(
+ description="ID of the mandate this configuration belongs to",
+ json_schema_extra={"label": "Mandanten-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ featureInstanceId: str = Field(
+ description="ID of the feature instance this configuration belongs to",
+ json_schema_extra={"label": "Feature-Instanz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ userId: str = Field(
+ description="ID of the user who created this configuration",
+ json_schema_extra={"label": "Benutzer-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ enabled: bool = Field(
+ default=True,
+ description="Whether data neutralization is enabled",
+ json_schema_extra={"label": "Aktiviert", "frontend_type": "checkbox", "frontend_readonly": False, "frontend_required": False},
+ )
+ scope: str = Field(
+ default="personal",
+ description="Data visibility scope: personal, featureInstance, mandate, global",
+ json_schema_extra={"label": "Sichtbarkeit", "frontend_type": "select", "frontend_readonly": False, "frontend_required": False, "frontend_options": [
+ {"value": "personal", "label": "Persönlich"},
+ {"value": "featureInstance", "label": "Feature-Instanz"},
+ {"value": "mandate", "label": "Mandant"},
+ {"value": "global", "label": "Global"},
+ ]},
+ )
+ neutralizationStatus: str = Field(
+ default="not_required",
+ description="Status of neutralization: pending, completed, failed, not_required",
+ json_schema_extra={"label": "Neutralisierungsstatus", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ namesToParse: str = Field(
+ default="",
+ description="Multiline list of names to parse for neutralization",
+ json_schema_extra={"label": "Zu parsende Namen", "frontend_type": "textarea", "frontend_readonly": False, "frontend_required": False},
+ )
+ sharepointSourcePath: str = Field(
+ default="",
+ description="SharePoint path to read files for neutralization",
+ json_schema_extra={"label": "SharePoint Quellpfad", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
+ )
+ sharepointTargetPath: str = Field(
+ default="",
+ description="SharePoint path to store neutralized files",
+ json_schema_extra={"label": "SharePoint Zielpfad", "frontend_type": "text", "frontend_readonly": False, "frontend_required": False},
+ )
+
+@i18nModel("Neutralisiertes Datenattribut")
class DataNeutralizerAttributes(BaseModel):
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the attribute mapping (used as UID in neutralized files)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- mandateId: str = Field(description="ID of the mandate this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- featureInstanceId: str = Field(description="ID of the feature instance this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- userId: str = Field(description="ID of the user who created this attribute", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- originalText: str = Field(description="Original text that was neutralized", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- fileId: Optional[str] = Field(default=None, description="ID of the file this attribute belongs to", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- patternType: str = Field(description="Type of pattern that matched (email, phone, name, etc.)", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
+ """Zuordnung Originaltext zu Platzhalter fuer neutralisierte Daten."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Unique ID of the attribute mapping (used as UID in neutralized files)",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ mandateId: str = Field(
+ description="ID of the mandate this attribute belongs to",
+ json_schema_extra={"label": "Mandanten-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ featureInstanceId: str = Field(
+ description="ID of the feature instance this attribute belongs to",
+ json_schema_extra={"label": "Feature-Instanz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ userId: str = Field(
+ description="ID of the user who created this attribute",
+ json_schema_extra={"label": "Benutzer-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ originalText: str = Field(
+ description="Original text that was neutralized",
+ json_schema_extra={"label": "Originaltext", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ fileId: Optional[str] = Field(
+ default=None,
+ description="ID of the file this attribute belongs to",
+ json_schema_extra={"label": "Datei-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ patternType: str = Field(
+ description="Type of pattern that matched (email, phone, name, etc.)",
+ json_schema_extra={"label": "Mustertyp", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+@i18nModel("Neutralisierungs-Snapshot")
class DataNeutralizationSnapshot(BaseModel):
- """Stores the full neutralized text (with embedded placeholders) per source."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- mandateId: str = Field(description="Mandate scope")
- featureInstanceId: str = Field(default="", description="Feature instance scope")
- userId: str = Field(description="User who triggered neutralization")
- sourceLabel: str = Field(description="Human label, e.g. 'Prompt', 'Kontext', 'Nachricht 3'")
- neutralizedText: str = Field(description="Full text with [type.uuid] placeholders embedded")
- placeholderCount: int = Field(default=0, description="Number of placeholders in the text")
-registerModelLabels(
- "DataNeutralizerAttributes",
- {"en": "Neutralized Data Attribute", "fr": "Attribut de données neutralisées"},
- {
- "id": {"en": "ID", "fr": "ID"},
- "mandateId": {"en": "Mandate ID", "fr": "ID de mandat"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance de fonctionnalité"},
- "userId": {"en": "User ID", "fr": "ID utilisateur"},
- "originalText": {"en": "Original Text", "fr": "Texte original"},
- "fileId": {"en": "File ID", "fr": "ID de fichier"},
- "patternType": {"en": "Pattern Type", "fr": "Type de modèle"},
- },
-)
-registerModelLabels(
- "DataNeutralizationSnapshot",
- {"en": "Neutralization Snapshot", "de": "Neutralisierungs-Snapshot"},
- {
- "id": {"en": "ID"},
- "mandateId": {"en": "Mandate ID"},
- "featureInstanceId": {"en": "Feature Instance ID"},
- "userId": {"en": "User ID"},
- "sourceLabel": {"en": "Source", "de": "Quelle"},
- "neutralizedText": {"en": "Neutralized Text", "de": "Neutralisierter Text"},
- "placeholderCount": {"en": "Placeholders", "de": "Platzhalter"},
- },
-)
-
-
+ """Speichert den vollstaendigen neutralisierten Text (mit Platzhaltern) pro Quelle."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ json_schema_extra={"label": "ID"},
+ )
+ mandateId: str = Field(
+ description="Mandate scope",
+ json_schema_extra={"label": "Mandanten-ID"},
+ )
+ featureInstanceId: str = Field(
+ default="",
+ description="Feature instance scope",
+ json_schema_extra={"label": "Feature-Instanz-ID"},
+ )
+ userId: str = Field(
+ description="User who triggered neutralization",
+ json_schema_extra={"label": "Benutzer-ID"},
+ )
+ sourceLabel: str = Field(
+ description="Human label, e.g. 'Prompt', 'Kontext', 'Nachricht 3'",
+ json_schema_extra={"label": "Quelle"},
+ )
+ neutralizedText: str = Field(
+ description="Full text with [type.uuid] placeholders embedded",
+ json_schema_extra={"label": "Neutralisierter Text"},
+ )
+ placeholderCount: int = Field(
+ default=0,
+ description="Number of placeholders in the text",
+ json_schema_extra={"label": "Platzhalter"},
+ )
diff --git a/modules/features/neutralization/mainNeutralization.py b/modules/features/neutralization/mainNeutralization.py
index bfe97a13..2c69fe7b 100644
--- a/modules/features/neutralization/mainNeutralization.py
+++ b/modules/features/neutralization/mainNeutralization.py
@@ -12,14 +12,14 @@ logger = logging.getLogger(__name__)
# Feature metadata
FEATURE_CODE = "neutralization"
-FEATURE_LABEL = {"en": "Neutralization", "de": "Neutralisierung", "fr": "Neutralisation"}
+FEATURE_LABEL = "Neutralisierung"
FEATURE_ICON = "mdi-shield-check"
# UI Objects for RBAC catalog
UI_OBJECTS = [
{
"objectKey": "ui.feature.neutralization.playground",
- "label": {"en": "Playground", "de": "Spielwiese", "fr": "Bac à sable"},
+ "label": "Spielwiese",
"meta": {"area": "playground"}
}
]
@@ -28,17 +28,17 @@ UI_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.neutralization.process.text",
- "label": {"en": "Process Text", "de": "Text verarbeiten", "fr": "Traiter texte"},
+ "label": "Text verarbeiten",
"meta": {"endpoint": "/api/neutralization/process/text", "method": "POST"}
},
{
"objectKey": "resource.feature.neutralization.process.files",
- "label": {"en": "Process Files", "de": "Dateien verarbeiten", "fr": "Traiter fichiers"},
+ "label": "Dateien verarbeiten",
"meta": {"endpoint": "/api/neutralization/process/files", "method": "POST"}
},
{
"objectKey": "resource.feature.neutralization.config.update",
- "label": {"en": "Update Config", "de": "Konfiguration aktualisieren", "fr": "Mettre à jour config"},
+ "label": "Konfiguration aktualisieren",
"meta": {"endpoint": "/api/neutralization/config", "method": "PUT"}
},
]
@@ -47,11 +47,7 @@ RESOURCE_OBJECTS = [
TEMPLATE_ROLES = [
{
"roleLabel": "neutralization-viewer",
- "description": {
- "en": "Neutralization Viewer - View neutralization data (read-only)",
- "de": "Neutralisierungs-Betrachter - Neutralisierungsdaten einsehen (nur lesen)",
- "fr": "Visualiseur neutralisation - Consulter les données de neutralisation (lecture seule)",
- },
+ "description": "Neutralisierungs-Betrachter - Neutralisierungsdaten einsehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.neutralization.playground", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
@@ -59,11 +55,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "neutralization-user",
- "description": {
- "en": "Neutralization User - Use neutralization tools and manage own data",
- "de": "Neutralisierungs-Benutzer - Neutralisierungstools nutzen und eigene Daten verwalten",
- "fr": "Utilisateur neutralisation - Utiliser les outils et gérer ses propres données",
- },
+ "description": "Neutralisierungs-Benutzer - Neutralisierungstools nutzen und eigene Daten verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.neutralization.playground", "view": True},
{"context": "UI", "item": "ui.feature.neutralization.attributes", "view": True},
@@ -72,11 +64,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "neutralization-admin",
- "description": {
- "en": "Neutralization Administrator - Full access to neutralization settings and data",
- "de": "Neutralisierungs-Administrator - Vollzugriff auf Neutralisierungs-Einstellungen und Daten",
- "fr": "Administrateur neutralisation - Accès complet aux paramètres et données",
- },
+ "description": "Neutralisierungs-Administrator - Vollzugriff auf Neutralisierungs-Einstellungen und Daten",
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
@@ -84,11 +72,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "neutralization-analyst",
- "description": {
- "en": "Neutralization Analyst - Analyze and process neutralization data",
- "de": "Neutralisierungs-Analyst - Neutralisierungsdaten analysieren und verarbeiten",
- "fr": "Analyste neutralisation - Analyser et traiter les données de neutralisation",
- },
+ "description": "Neutralisierungs-Analyst - Neutralisierungsdaten analysieren und verarbeiten",
"accessRules": [
{"context": "UI", "item": "ui.feature.neutralization.playground", "view": True},
{"context": "UI", "item": "ui.feature.neutralization.attributes", "view": True},
@@ -163,7 +147,8 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
-
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
+
rootInterface = getRootInterface()
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
@@ -180,7 +165,7 @@ def _syncTemplateRolesToDb() -> int:
else:
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
diff --git a/modules/features/neutralization/routeFeatureNeutralizer.py b/modules/features/neutralization/routeFeatureNeutralizer.py
index 2f36efef..bf396e3b 100644
--- a/modules/features/neutralization/routeFeatureNeutralizer.py
+++ b/modules/features/neutralization/routeFeatureNeutralizer.py
@@ -10,6 +10,8 @@ from modules.auth import limiter, getRequestContext, RequestContext
# Import interfaces
from .datamodelFeatureNeutralizer import DataNeutraliserConfig, DataNeutralizerAttributes, DataNeutralizationSnapshot
from .neutralizePlayground import NeutralizationPlayground
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeFeatureNeutralizer")
# Configure logger
logger = logging.getLogger(__name__)
@@ -22,7 +24,7 @@ def _assertFeatureInstancePathMatchesContext(featureInstanceIdFromPath: str, con
if ctxId and pathId and pathId != ctxId:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Feature instance id in URL does not match request context (X-Instance-Id)",
+ detail=routeApiMsg("Feature instance id in URL does not match request context (X-Instance-Id)"),
)
@@ -123,13 +125,13 @@ async def neutralize_file(
if not file.filename or not file.filename.strip():
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="File name is required"
+ detail=routeApiMsg("File name is required")
)
content = await file.read()
if not content:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="File is empty"
+ detail=routeApiMsg("File is empty")
)
service = NeutralizationPlayground(
context.user,
@@ -164,7 +166,7 @@ def neutralize_text(
if not text:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Text content is required"
+ detail=routeApiMsg("Text content is required")
)
service = NeutralizationPlayground(
@@ -199,7 +201,7 @@ def resolve_text(
if not text:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Text content is required"
+ detail=routeApiMsg("Text content is required")
)
service = NeutralizationPlayground(
@@ -320,7 +322,7 @@ async def process_sharepoint_files(
if not source_path or not target_path:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Both source and target paths are required"
+ detail=routeApiMsg("Both source and target paths are required")
)
service = NeutralizationPlayground(
@@ -353,7 +355,7 @@ def batch_process_files(
if not files_data:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Files data is required"
+ detail=routeApiMsg("Files data is required")
)
service = NeutralizationPlayground(
@@ -453,7 +455,7 @@ def _retriggerNeutralizationBody(context: RequestContext, fileId: str) -> Dict[s
if not fileId:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="fileId is required",
+ detail=routeApiMsg("fileId is required"),
)
service = NeutralizationPlayground(
context.user,
@@ -521,7 +523,7 @@ def cleanup_file_attributes(
else:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to cleanup file attributes"
+ detail=routeApiMsg("Failed to cleanup file attributes")
)
except HTTPException:
diff --git a/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py b/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py
index 4c0842d4..7b680edc 100644
--- a/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py
+++ b/modules/features/neutralization/serviceNeutralization/mainServiceNeutralization.py
@@ -20,7 +20,7 @@ from modules.features.neutralization.interfaceFeatureNeutralizer import Interfac
# Import all necessary classes and functions for neutralization
from .subProcessCommon import CommonUtils, NeutralizationResult, NeutralizationAttribute
from .subProcessText import TextProcessor, PlainText
-from .subProcessList import ListProcessor, TableData
+from .subProcessList import ListProcessor, NeutralizationTableData
from .subProcessBinary import BinaryProcessor
from .subProcessPdfInPlace import neutralize_pdf_in_place
from .subPatterns import HeaderPatterns, DataPatterns, TextTablePatterns
@@ -90,16 +90,24 @@ class NeutralizationService:
_NEUT_INSTRUCTION = (
"Analyze the following text and identify ALL sensitive content that must be neutralized:\n"
- "1. Personal data (PII): names of persons, email addresses, phone numbers, "
- "physical addresses, ID numbers, dates of birth, financial data (IBAN, account numbers), "
- "social security numbers\n"
+ "1. Personal data (PII):\n"
+ " - Full names of persons\n"
+ " - Email addresses\n"
+ " - Phone numbers\n"
+ " - Physical addresses (street, city, postal code)\n"
+ " - ID numbers (passport, driver license, AHV/SSN)\n"
+ " - Dates of birth (e.g. '14.03.1982', '1982-03-14', 'March 14, 1982', 'born in 1982')\n"
+ " - Age when it identifies a person\n"
+ " - Financial data (IBAN, account numbers, salary, balances)\n"
+ " - Nationality, citizenship, place of origin\n"
"2. Protected business logic: proprietary algorithms, trade secrets, confidential "
"processes, internal procedures, code snippets that reveal implementation details\n"
"3. Named entities: company names, product names, project names, brand names\n\n"
"Return ONLY a JSON array (no markdown, no explanation):\n"
- '[{"text":"exact substring","type":"name|email|phone|address|id|financial|logic|company|product|location|other"}]\n\n'
+ '[{"text":"exact substring","type":"name|email|phone|address|id|dob|financial|nationality|logic|company|product|location|other"}]\n\n'
"Rules:\n"
"- Every entry's 'text' must be an exact, verbatim substring of the input.\n"
+ "- Dates of birth MUST always be captured — use type 'dob'.\n"
"- Do NOT include generic words, common language constructs or non-sensitive terms.\n"
"- If nothing is sensitive, return [].\n\n"
)
diff --git a/modules/features/neutralization/serviceNeutralization/subProcessList.py b/modules/features/neutralization/serviceNeutralization/subProcessList.py
index 97721535..8f815e1e 100644
--- a/modules/features/neutralization/serviceNeutralization/subProcessList.py
+++ b/modules/features/neutralization/serviceNeutralization/subProcessList.py
@@ -15,7 +15,7 @@ from .subParseString import StringParser
from .subPatterns import getPatternForHeader, HeaderPatterns
@dataclass
-class TableData:
+class NeutralizationTableData:
"""Repräsentiert Tabellendaten"""
headers: List[str]
rows: List[List[str]]
@@ -34,17 +34,17 @@ class ListProcessor:
self.string_parser = StringParser(NamesToParse)
self.header_patterns = HeaderPatterns.patterns
- def _anonymizeTable(self, table: TableData) -> TableData:
+ def _anonymizeTable(self, table: NeutralizationTableData) -> NeutralizationTableData:
"""
Anonymize table data based on headers
Args:
- table: TableData object to anonymize
+ table: NeutralizationTableData object to anonymize
Returns:
- TableData: Anonymized table
+ NeutralizationTableData: Anonymized table
"""
- anonymizedTable = TableData(
+ anonymizedTable = NeutralizationTableData(
headers=table.headers.copy(),
rows=[row.copy() for row in table.rows],
source_type=table.source_type
@@ -76,7 +76,7 @@ class ListProcessor:
Tuple of (processed_data, mapping, replaced_fields, processed_info)
"""
df = pd.read_csv(StringIO(content), encoding='utf-8')
- table = TableData(
+ table = NeutralizationTableData(
headers=df.columns.tolist(),
rows=df.values.tolist(),
source_type='csv'
diff --git a/modules/features/realEstate/datamodelFeatureRealEstate.py b/modules/features/realEstate/datamodelFeatureRealEstate.py
index 8f136056..c12090d1 100644
--- a/modules/features/realEstate/datamodelFeatureRealEstate.py
+++ b/modules/features/realEstate/datamodelFeatureRealEstate.py
@@ -8,7 +8,7 @@ from typing import List, Dict, Any, Optional, ForwardRef
from enum import Enum
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
from modules.shared.timeUtils import getUtcTimestamp
import uuid
@@ -109,6 +109,7 @@ class GeoPolylinie(BaseModel):
)
+@i18nModel("Dokument")
class Dokument(BaseModel):
"""Supporting data object for file and URL management with versioning."""
id: str = Field(
@@ -117,24 +118,28 @@ class Dokument(BaseModel):
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="ID",
)
mandateId: str = Field(
description="ID of the mandate this document belongs to",
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="Mandats-ID",
)
featureInstanceId: str = Field(
description="ID of the feature instance this document belongs to",
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="Feature-Instanz-ID",
)
label: str = Field(
description="Document label",
frontend_type="text",
frontend_readonly=False,
frontend_required=True,
+ label="Bezeichnung",
)
versionsbezeichnung: Optional[str] = Field(
None,
@@ -369,6 +374,7 @@ class Gemeinde(BaseModel):
ParzelleRef = ForwardRef('Parzelle')
+@i18nModel("Parzelle")
class Parzelle(PowerOnModel):
"""Represents a plot with all building law properties."""
id: str = Field(
@@ -377,18 +383,21 @@ class Parzelle(PowerOnModel):
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="ID",
)
mandateId: str = Field(
description="ID of the mandate",
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="Mandats-ID",
)
featureInstanceId: str = Field(
description="ID of the feature instance",
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="Feature-Instanz-ID",
)
# Grunddaten
@@ -397,6 +406,7 @@ class Parzelle(PowerOnModel):
frontend_type="text",
frontend_readonly=False,
frontend_required=True,
+ label="Bezeichnung",
)
parzellenAliasTags: List[str] = Field(
default_factory=list,
@@ -595,6 +605,7 @@ class Parzelle(PowerOnModel):
)
+@i18nModel("Projekt")
class Projekt(PowerOnModel):
"""Core object representing a construction project."""
id: str = Field(
@@ -603,24 +614,28 @@ class Projekt(PowerOnModel):
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="ID",
)
mandateId: str = Field(
description="ID of the mandate",
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="Mandats-ID",
)
featureInstanceId: str = Field(
description="ID of the feature instance",
frontend_type="text",
frontend_readonly=True,
frontend_required=False,
+ label="Feature-Instanz-ID",
)
label: str = Field(
description="Project designation",
frontend_type="text",
frontend_readonly=False,
frontend_required=True,
+ label="Bezeichnung",
)
statusProzess: Optional[StatusProzess] = Field(
None,
@@ -628,6 +643,7 @@ class Projekt(PowerOnModel):
frontend_type="select",
frontend_readonly=False,
frontend_required=False,
+ label="Prozessstatus",
)
perimeter: Optional[GeoPolylinie] = Field(
None,
@@ -670,39 +686,3 @@ class Projekt(PowerOnModel):
Parzelle.model_rebuild()
Projekt.model_rebuild()
-
-# Register labels for frontend
-registerModelLabels(
- "Projekt",
- {"en": "Project", "fr": "Projet", "de": "Projekt"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "label": {"en": "Label", "fr": "Libellé", "de": "Bezeichnung"},
- "statusProzess": {"en": "Process Status", "fr": "Statut du processus", "de": "Prozessstatus"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat", "de": "Mandats-ID"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance", "de": "Feature-Instanz-ID"},
- },
-)
-
-registerModelLabels(
- "Parzelle",
- {"en": "Plot", "fr": "Parcelle", "de": "Parzelle"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "label": {"en": "Label", "fr": "Libellé", "de": "Bezeichnung"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat", "de": "Mandats-ID"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance", "de": "Feature-Instanz-ID"},
- },
-)
-
-registerModelLabels(
- "Dokument",
- {"en": "Document", "fr": "Document", "de": "Dokument"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "label": {"en": "Label", "fr": "Libellé", "de": "Bezeichnung"},
- "mandateId": {"en": "Mandate ID", "fr": "ID du mandat", "de": "Mandats-ID"},
- "featureInstanceId": {"en": "Feature Instance ID", "fr": "ID de l'instance", "de": "Feature-Instanz-ID"},
- },
-)
-
diff --git a/modules/features/realEstate/mainRealEstate.py b/modules/features/realEstate/mainRealEstate.py
index dfe310d5..0ae29159 100644
--- a/modules/features/realEstate/mainRealEstate.py
+++ b/modules/features/realEstate/mainRealEstate.py
@@ -10,14 +10,14 @@ import logging
# Feature metadata for RBAC catalog
FEATURE_CODE = "realestate"
-FEATURE_LABEL = {"en": "Real Estate", "de": "Immobilien", "fr": "Immobilier"}
+FEATURE_LABEL = "Immobilien"
FEATURE_ICON = "mdi-home-city"
# UI Objects for RBAC catalog (only map view)
UI_OBJECTS = [
{
"objectKey": "ui.feature.realestate.dashboard",
- "label": {"en": "Map", "de": "Karte", "fr": "Carte"},
+ "label": "Karte",
"meta": {"area": "dashboard"}
},
]
@@ -26,12 +26,12 @@ UI_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.realestate.project.create",
- "label": {"en": "Create Project", "de": "Projekt erstellen", "fr": "Créer projet"},
+ "label": "Projekt erstellen",
"meta": {"endpoint": "/api/realestate/project", "method": "POST"}
},
{
"objectKey": "resource.feature.realestate.project.delete",
- "label": {"en": "Delete Project", "de": "Projekt löschen", "fr": "Supprimer projet"},
+ "label": "Projekt löschen",
"meta": {"endpoint": "/api/realestate/project/{projectId}", "method": "DELETE"}
},
]
@@ -41,11 +41,7 @@ RESOURCE_OBJECTS = [
TEMPLATE_ROLES = [
{
"roleLabel": "realestate-viewer",
- "description": {
- "en": "Real Estate Viewer - View property information (read-only)",
- "de": "Immobilien-Betrachter - Immobilien-Informationen einsehen (nur lesen)",
- "fr": "Visualiseur immobilier - Consulter les informations immobilières (lecture seule)",
- },
+ "description": "Immobilien-Betrachter - Immobilien-Informationen einsehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
@@ -53,11 +49,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "realestate-user",
- "description": {
- "en": "Real Estate User - Create and manage own property records",
- "de": "Immobilien-Benutzer - Eigene Immobilien-Daten erstellen und verwalten",
- "fr": "Utilisateur immobilier - Créer et gérer ses propres données immobilières",
- },
+ "description": "Immobilien-Benutzer - Eigene Immobilien-Daten erstellen und verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
@@ -66,11 +58,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "realestate-admin",
- "description": {
- "en": "Real Estate Administrator - Full access to all property data and settings",
- "de": "Immobilien-Administrator - Vollzugriff auf alle Immobiliendaten und Einstellungen",
- "fr": "Administrateur immobilier - Accès complet aux données et paramètres",
- },
+ "description": "Immobilien-Administrator - Vollzugriff auf alle Immobiliendaten und Einstellungen",
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
@@ -80,11 +68,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "realestate-manager",
- "description": {
- "en": "Real Estate Manager - Manage properties and tenants",
- "de": "Immobilien-Verwalter - Immobilien und Mieter verwalten",
- "fr": "Gestionnaire immobilier - Gérer les propriétés et locataires",
- },
+ "description": "Immobilien-Verwalter - Immobilien und Mieter verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.realestate.dashboard", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "g"},
@@ -154,6 +138,7 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
rootInterface = getRootInterface()
db = rootInterface.db
@@ -174,7 +159,7 @@ def _syncTemplateRolesToDb() -> int:
else:
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
diff --git a/modules/features/realEstate/routeFeatureRealEstate.py b/modules/features/realEstate/routeFeatureRealEstate.py
index 82fa55ba..58faca8e 100644
--- a/modules/features/realEstate/routeFeatureRealEstate.py
+++ b/modules/features/realEstate/routeFeatureRealEstate.py
@@ -59,6 +59,8 @@ from modules.aicore.aicorePluginTavily import AiTavily
# Import attribute utilities for model schema
from modules.shared.attributeUtils import getModelAttributeDefinitions
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeFeatureRealEstate")
# Configure logger
logger = logging.getLogger(__name__)
@@ -339,7 +341,7 @@ def update_project(
raise HTTPException(status_code=404, detail=f"Project '{projectId}' not found")
updated = interface.updateProjekt(projectId, data)
if not updated:
- raise HTTPException(status_code=500, detail="Update failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Update failed"))
return updated
@@ -360,7 +362,7 @@ def delete_project(
if not projekt or str(getattr(projekt, "featureInstanceId", None)) != instanceId:
raise HTTPException(status_code=404, detail=f"Project '{projectId}' not found")
if not interface.deleteProjekt(projectId):
- raise HTTPException(status_code=500, detail="Delete failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Delete failed"))
# ----- Parcels CRUD -----
@@ -496,7 +498,7 @@ def update_parcel(
raise HTTPException(status_code=404, detail=f"Parcel '{parcelId}' not found")
updated = interface.updateParzelle(parcelId, data)
if not updated:
- raise HTTPException(status_code=500, detail="Update failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Update failed"))
return updated
@@ -517,7 +519,7 @@ def delete_parcel(
if not parzelle or str(getattr(parzelle, "featureInstanceId", None)) != instanceId:
raise HTTPException(status_code=404, detail=f"Parcel '{parcelId}' not found")
if not interface.deleteParzelle(parcelId):
- raise HTTPException(status_code=500, detail="Delete failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Delete failed"))
# ===== Helpers for Gemeinde/BZO routes =====
@@ -885,7 +887,7 @@ async def process_command(
logger.warning(f"CSRF token missing for POST /api/realestate/command from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -893,7 +895,7 @@ async def process_command(
logger.warning(f"Invalid CSRF token format for POST /api/realestate/command from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -903,7 +905,7 @@ async def process_command(
logger.warning(f"CSRF token is not a valid hex string for POST /api/realestate/command from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Processing command request from user {context.user.id} (mandate: {context.mandateId})")
@@ -957,7 +959,7 @@ def get_available_tables(
logger.warning(f"CSRF token missing for GET /api/realestate/tables from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -965,7 +967,7 @@ def get_available_tables(
logger.warning(f"Invalid CSRF token format for GET /api/realestate/tables from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -975,7 +977,7 @@ def get_available_tables(
logger.warning(f"CSRF token is not a valid hex string for GET /api/realestate/tables from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Getting available tables for user {context.user.id} (mandate: {context.mandateId})")
@@ -1066,7 +1068,7 @@ def get_table_data(
logger.warning(f"CSRF token missing for GET /api/realestate/table/{table} from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -1074,7 +1076,7 @@ def get_table_data(
logger.warning(f"Invalid CSRF token format for GET /api/realestate/table/{table} from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -1084,7 +1086,7 @@ def get_table_data(
logger.warning(f"CSRF token is not a valid hex string for GET /api/realestate/table/{table} from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Getting table data for '{table}' from user {context.user.id} (mandate: {context.mandateId})")
@@ -1235,7 +1237,7 @@ async def create_table_record(
logger.warning(f"CSRF token missing for POST /api/realestate/table/{table} from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -1243,7 +1245,7 @@ async def create_table_record(
logger.warning(f"Invalid CSRF token format for POST /api/realestate/table/{table} from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -1253,7 +1255,7 @@ async def create_table_record(
logger.warning(f"CSRF token is not a valid hex string for POST /api/realestate/table/{table} from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Special handling for Projekt with parcel data
@@ -1265,7 +1267,7 @@ async def create_table_record(
if not label:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="label is required"
+ detail=routeApiMsg("label is required")
)
status_prozess = data.get("statusProzess", "Eingang")
@@ -1278,7 +1280,7 @@ async def create_table_record(
if not isinstance(parzellen_data, list):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="parzellen must be an array"
+ detail=routeApiMsg("parzellen must be an array")
)
elif "parzelle" in data:
# Single parcel
@@ -1289,7 +1291,7 @@ async def create_table_record(
if not parzellen_data:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="parzelle or parzellen data is required"
+ detail=routeApiMsg("parzelle or parzellen data is required")
)
# Use helper function to create project with parcel data
@@ -1402,7 +1404,7 @@ def get_parcels_wfs(
logger.error(f"Error fetching WFS parcels: {e}", exc_info=True)
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY,
- detail="Failed to fetch parcel data from WFS"
+ detail=routeApiMsg("Failed to fetch parcel data from WFS")
)
@@ -1441,7 +1443,7 @@ async def search_parcel(
logger.warning(f"CSRF token missing for GET /api/realestate/parcel/search from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
logger.info(f"Searching parcel for user {context.user.id} (mandate: {context.mandateId}) with location: {location}")
@@ -1817,7 +1819,7 @@ async def parcel_selection_summary(
if not csrf_token:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
parcels = body.get("parcels", [])
if not parcels:
@@ -1868,19 +1870,19 @@ async def add_adjacent_parcel(
if not csrf_token:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
location = body.get("location")
selected_parcels = body.get("selected_parcels", [])
if not location or "x" not in location or "y" not in location:
- raise HTTPException(status_code=400, detail="location with x,y required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("location with x,y required"))
loc_str = f"{location['x']},{location['y']}"
connector = SwissTopoMapServerConnector()
parcel_data = await connector.search_parcel(loc_str)
if not parcel_data:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="No parcel found at this location"
+ detail=routeApiMsg("No parcel found at this location")
)
extracted = connector.extract_parcel_attributes(parcel_data)
attributes = parcel_data.get("attributes", {})
@@ -1932,7 +1934,7 @@ async def add_adjacent_parcel(
if not is_parcel_adjacent_to_selection(new_parcel_response, selected_parcels):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Nur angrenzende Parzellen können hinzugefügt werden"
+ detail=routeApiMsg("Nur angrenzende Parzellen können hinzugefügt werden")
)
bbox = parcel_data.get("bbox", [])
map_view["zoom_bounds"] = {
@@ -2020,21 +2022,21 @@ async def add_parcel_to_project(
logger.warning(f"CSRF token missing for POST /api/realestate/projekt/{projekt_id}/add-parcel from user {context.user.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Validate CSRF token format
if not isinstance(csrf_token, str) or len(csrf_token) < 16 or len(csrf_token) > 64:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
try:
int(csrf_token, 16)
except ValueError:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Adding parcel to project {projekt_id} for user {context.user.id} (mandate: {context.mandateId})")
diff --git a/modules/features/teamsbot/mainTeamsbot.py b/modules/features/teamsbot/mainTeamsbot.py
index ea6d3b01..02d7c333 100644
--- a/modules/features/teamsbot/mainTeamsbot.py
+++ b/modules/features/teamsbot/mainTeamsbot.py
@@ -12,24 +12,24 @@ logger = logging.getLogger(__name__)
# Feature metadata
FEATURE_CODE = "teamsbot"
-FEATURE_LABEL = {"en": "Teams Bot", "de": "Teams Bot", "fr": "Teams Bot"}
+FEATURE_LABEL = "Teams Bot"
FEATURE_ICON = "mdi-headset"
# UI Objects for RBAC catalog
UI_OBJECTS = [
{
"objectKey": "ui.feature.teamsbot.dashboard",
- "label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
+ "label": "Dashboard",
"meta": {"area": "dashboard"}
},
{
"objectKey": "ui.feature.teamsbot.sessions",
- "label": {"en": "Sessions", "de": "Sitzungen", "fr": "Sessions"},
+ "label": "Sitzungen",
"meta": {"area": "sessions"}
},
{
"objectKey": "ui.feature.teamsbot.settings",
- "label": {"en": "Settings", "de": "Einstellungen", "fr": "Paramètres"},
+ "label": "Einstellungen",
"meta": {"area": "settings", "admin_only": True}
},
]
@@ -38,7 +38,7 @@ UI_OBJECTS = [
DATA_OBJECTS = [
{
"objectKey": "data.feature.teamsbot.TeamsbotSession",
- "label": {"en": "Session", "de": "Sitzung", "fr": "Session"},
+ "label": "Sitzung",
"meta": {
"table": "TeamsbotSession",
"fields": ["id", "meetingLink", "botName", "status", "startedAt", "endedAt"],
@@ -48,7 +48,7 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.teamsbot.TeamsbotTranscript",
- "label": {"en": "Transcript", "de": "Transkript", "fr": "Transcription"},
+ "label": "Transkript",
"meta": {
"table": "TeamsbotTranscript",
"fields": ["id", "sessionId", "speaker", "text", "timestamp"],
@@ -58,7 +58,7 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.teamsbot.TeamsbotBotResponse",
- "label": {"en": "Bot Response", "de": "Bot-Antwort", "fr": "Réponse du bot"},
+ "label": "Bot-Antwort",
"meta": {
"table": "TeamsbotBotResponse",
"fields": ["id", "sessionId", "responseText", "detectedIntent"],
@@ -68,7 +68,7 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.teamsbot.*",
- "label": {"en": "All Teams Bot Data", "de": "Alle Teams Bot Daten", "fr": "Toutes les données Teams Bot"},
+ "label": "Alle Teams Bot Daten",
"meta": {"wildcard": True, "description": "Wildcard for all teamsbot data tables"}
},
]
@@ -77,22 +77,22 @@ DATA_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.teamsbot.session.start",
- "label": {"en": "Start Session", "de": "Sitzung starten", "fr": "Démarrer session"},
+ "label": "Sitzung starten",
"meta": {"endpoint": "/api/teamsbot/{instanceId}/sessions", "method": "POST"}
},
{
"objectKey": "resource.feature.teamsbot.session.stop",
- "label": {"en": "Stop Session", "de": "Sitzung beenden", "fr": "Arrêter session"},
+ "label": "Sitzung beenden",
"meta": {"endpoint": "/api/teamsbot/{instanceId}/sessions/{sessionId}/stop", "method": "POST"}
},
{
"objectKey": "resource.feature.teamsbot.session.delete",
- "label": {"en": "Delete Session", "de": "Sitzung löschen", "fr": "Supprimer session"},
+ "label": "Sitzung löschen",
"meta": {"endpoint": "/api/teamsbot/{instanceId}/sessions/{sessionId}", "method": "DELETE"}
},
{
"objectKey": "resource.feature.teamsbot.config.edit",
- "label": {"en": "Edit Configuration", "de": "Konfiguration bearbeiten", "fr": "Modifier configuration"},
+ "label": "Konfiguration bearbeiten",
"meta": {"endpoint": "/api/teamsbot/{instanceId}/config", "method": "PUT", "admin_only": True}
},
]
@@ -101,11 +101,7 @@ RESOURCE_OBJECTS = [
TEMPLATE_ROLES = [
{
"roleLabel": "teamsbot-admin",
- "description": {
- "en": "Teams Bot Administrator - Full access to all sessions and settings",
- "de": "Teams Bot Administrator - Vollzugriff auf alle Sitzungen und Einstellungen",
- "fr": "Administrateur Teams Bot - Accès complet aux sessions et paramètres"
- },
+ "description": "Teams Bot Administrator - Vollzugriff auf alle Sitzungen und Einstellungen",
"accessRules": [
# Full UI access (all views including settings)
{"context": "UI", "item": None, "view": True},
@@ -120,11 +116,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "teamsbot-viewer",
- "description": {
- "en": "Teams Bot Viewer - View sessions and transcripts (read-only)",
- "de": "Teams Bot Betrachter - Sitzungen und Transkripte ansehen (nur lesen)",
- "fr": "Visualiseur Teams Bot - Consulter les sessions et transcriptions (lecture seule)",
- },
+ "description": "Teams Bot Betrachter - Sitzungen und Transkripte ansehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.teamsbot.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.teamsbot.sessions", "view": True},
@@ -133,11 +125,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "teamsbot-user",
- "description": {
- "en": "Teams Bot User - Can start/stop sessions and view transcripts",
- "de": "Teams Bot Benutzer - Kann Sitzungen starten/stoppen und Transkripte einsehen",
- "fr": "Utilisateur Teams Bot - Peut démarrer/arrêter des sessions et voir les transcriptions",
- },
+ "description": "Teams Bot Benutzer - Kann Sitzungen starten/stoppen und Transkripte einsehen",
"accessRules": [
{"context": "UI", "item": "ui.feature.teamsbot.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.teamsbot.sessions", "view": True},
@@ -223,7 +211,8 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
-
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
+
rootInterface = getRootInterface()
existingRoles = rootInterface.getRolesByFeatureCode(FEATURE_CODE)
templateRoles = [r for r in existingRoles if r.mandateId is None]
@@ -239,7 +228,7 @@ def _syncTemplateRolesToDb() -> int:
else:
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
diff --git a/modules/features/teamsbot/routeFeatureTeamsbot.py b/modules/features/teamsbot/routeFeatureTeamsbot.py
index c498a790..d316bde2 100644
--- a/modules/features/teamsbot/routeFeatureTeamsbot.py
+++ b/modules/features/teamsbot/routeFeatureTeamsbot.py
@@ -40,6 +40,8 @@ from .datamodelTeamsbot import (
# Import service
from .service import TeamsbotService
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeFeatureTeamsbot")
logger = logging.getLogger(__name__)
@@ -71,7 +73,7 @@ def _extractTeamsMeetingUrl(rawInput: str) -> str:
urls = re.findall(urlPattern, rawInput)
if not urls:
- raise HTTPException(status_code=400, detail="Kein gültiger Meeting-Link gefunden. Bitte einen Teams-Link eingeben.")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Kein gültiger Meeting-Link gefunden. Bitte einen Teams-Link eingeben."))
# Step 2: Find the Teams URL (prefer direct teams.microsoft.com, then SafeLinks)
teamsUrl = None
@@ -101,7 +103,7 @@ def _extractTeamsMeetingUrl(rawInput: str) -> str:
if not teamsUrl or "teams.microsoft.com" not in teamsUrl:
raise HTTPException(
status_code=400,
- detail="Kein gültiger Teams-Meeting-Link gefunden. Der Link muss 'teams.microsoft.com' enthalten."
+ detail=routeApiMsg("Kein gültiger Teams-Meeting-Link gefunden. Der Link muss 'teams.microsoft.com' enthalten.")
)
logger.info(f"Extracted meeting URL: {teamsUrl[:80]}... (from input length {len(rawInput)})")
@@ -129,7 +131,7 @@ def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
mandateId = instance.get("mandateId") if isinstance(instance, dict) else getattr(instance, "mandateId", None)
if not mandateId:
- raise HTTPException(status_code=500, detail="Feature instance has no mandateId")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Feature instance has no mandateId"))
return str(mandateId)
@@ -463,7 +465,7 @@ async def deleteSession(
# Don't delete active sessions
currentStatus = session.get("status")
if currentStatus in [TeamsbotSessionStatus.ACTIVE.value, TeamsbotSessionStatus.JOINING.value]:
- raise HTTPException(status_code=400, detail="Cannot delete an active session. Stop it first.")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Cannot delete an active session. Stop it first."))
interface.deleteSession(sessionId)
logger.info(f"Teamsbot session {sessionId} deleted")
@@ -639,7 +641,7 @@ async def listSystemBots(
):
"""List all system bot accounts for this mandate. Passwords are never returned."""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required to manage system bots")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required to manage system bots"))
mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
bots = interface.getSystemBots(mandateId)
@@ -655,7 +657,7 @@ async def createSystemBot(
):
"""Create a new system bot account. Password is encrypted before storage."""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required to manage system bots")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required to manage system bots"))
mandateId = _validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
@@ -666,7 +668,7 @@ async def createSystemBot(
if not email or not password:
from fastapi import HTTPException
- raise HTTPException(status_code=400, detail="Email and password are required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Email and password are required"))
# Encrypt the password
from modules.shared.configuration import encryptValue
@@ -698,7 +700,7 @@ async def deleteSystemBot(
):
"""Delete a system bot account."""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required to manage system bots")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required to manage system bots"))
_validateInstanceAccess(instanceId, context)
interface = _getInterface(context, instanceId)
@@ -750,7 +752,7 @@ async def saveUserAccount(
displayName = body.get("displayName")
if not email or not password:
- raise HTTPException(status_code=400, detail="Email and password are required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Email and password are required"))
from modules.shared.configuration import encryptValue
encryptedPassword = encryptValue(password, userId=userId, keyName="userAccountPassword")
@@ -827,7 +829,7 @@ async def submitMfaCode(
await queue.put({"action": mfaAction, "code": mfaCode})
return {"submitted": True}
else:
- raise HTTPException(status_code=404, detail="No active MFA challenge for this session")
+ raise HTTPException(status_code=404, detail=routeApiMsg("No active MFA challenge for this session"))
# =========================================================================
@@ -925,7 +927,7 @@ async def testAuth(
Does NOT join the meeting — only checks which page Teams serves.
"""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required for auth testing (uses system bot credentials)")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required for auth testing (uses system bot credentials)"))
import aiohttp
mandateId = _validateInstanceAccess(instanceId, context)
@@ -935,7 +937,7 @@ async def testAuth(
body = await request.json()
meetingUrl = body.get("meetingUrl")
if not meetingUrl:
- raise HTTPException(status_code=400, detail="meetingUrl is required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("meetingUrl is required"))
# Load system bot credentials:
# 1. Use email/password from request body (direct override)
@@ -1000,7 +1002,7 @@ async def testAuth(
# Forward to browser bot service (single all-in-one call — may timeout with many variants)
browserBotUrl = effectiveConfig._getEffectiveBrowserBotUrl()
if not browserBotUrl:
- raise HTTPException(status_code=503, detail="Browser Bot URL not configured")
+ raise HTTPException(status_code=503, detail=routeApiMsg("Browser Bot URL not configured"))
browserBotUrl = browserBotUrl.rstrip("/")
payload = {
@@ -1037,14 +1039,14 @@ async def getTestAuthVariants(
Frontend calls this once, then runs each variant individually.
"""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required for auth testing")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required for auth testing"))
import aiohttp
_validateInstanceAccess(instanceId, context)
effectiveConfig = _getInstanceConfig(instanceId)
browserBotUrl = effectiveConfig._getEffectiveBrowserBotUrl()
if not browserBotUrl:
- raise HTTPException(status_code=503, detail="Browser Bot URL not configured")
+ raise HTTPException(status_code=503, detail=routeApiMsg("Browser Bot URL not configured"))
browserBotUrl = browserBotUrl.rstrip("/")
try:
@@ -1073,7 +1075,7 @@ async def testAuthSingleVariant(
Each call stays within Azure's 240s timeout.
"""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required for auth testing (uses system bot credentials)")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required for auth testing (uses system bot credentials)"))
import aiohttp
mandateId = _validateInstanceAccess(instanceId, context)
@@ -1084,7 +1086,7 @@ async def testAuthSingleVariant(
variantId = body.get("variantId")
meetingUrl = body.get("meetingUrl")
if not variantId or not meetingUrl:
- raise HTTPException(status_code=400, detail="variantId and meetingUrl are required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("variantId and meetingUrl are required"))
# Load credentials (same logic as testAuth)
email = body.get("botEmail")
@@ -1116,7 +1118,7 @@ async def testAuthSingleVariant(
browserBotUrl = effectiveConfig._getEffectiveBrowserBotUrl()
if not browserBotUrl:
- raise HTTPException(status_code=503, detail="Browser Bot URL not configured")
+ raise HTTPException(status_code=503, detail=routeApiMsg("Browser Bot URL not configured"))
browserBotUrl = browserBotUrl.rstrip("/")
payload = {
@@ -1157,12 +1159,12 @@ async def listSessionScreenshots(
):
"""List debug screenshots for a session. Proxied from Browser Bot filesystem."""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required"))
_validateInstanceAccess(instanceId, context)
effectiveConfig = _getInstanceConfig(instanceId)
browserBotUrl = effectiveConfig._getEffectiveBrowserBotUrl()
if not browserBotUrl:
- raise HTTPException(status_code=503, detail="Browser Bot URL not configured")
+ raise HTTPException(status_code=503, detail=routeApiMsg("Browser Bot URL not configured"))
import aiohttp
browserBotUrl = browserBotUrl.rstrip("/")
@@ -1194,16 +1196,16 @@ async def getScreenshotFile(
):
"""Serve a single debug screenshot image. Proxied from Browser Bot."""
if not context.isSysAdmin:
- raise HTTPException(status_code=403, detail="SysAdmin privileges required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("SysAdmin privileges required"))
_validateInstanceAccess(instanceId, context)
if not filename.endswith(".png") or ".." in filename or "/" in filename or "\\" in filename:
- raise HTTPException(status_code=400, detail="Invalid filename")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid filename"))
effectiveConfig = _getInstanceConfig(instanceId)
browserBotUrl = effectiveConfig._getEffectiveBrowserBotUrl()
if not browserBotUrl:
- raise HTTPException(status_code=503, detail="Browser Bot URL not configured")
+ raise HTTPException(status_code=503, detail=routeApiMsg("Browser Bot URL not configured"))
import aiohttp
from fastapi.responses import Response as FastAPIResponse
@@ -1216,7 +1218,7 @@ async def getScreenshotFile(
imageBytes = await resp.read()
return FastAPIResponse(content=imageBytes, media_type="image/png")
else:
- raise HTTPException(status_code=resp.status, detail="Screenshot not found")
+ raise HTTPException(status_code=resp.status, detail=routeApiMsg("Screenshot not found"))
except aiohttp.ClientError as e:
logger.error(f"Screenshot file error: {e}")
raise HTTPException(status_code=503, detail=f"Browser Bot connection failed: {str(e)}")
@@ -1250,18 +1252,16 @@ async def postTranscript(
config = _getInstanceConfig(instanceId)
# Load original user context from session
- from modules.datamodels.datamodelUam import User
-
- systemUser = User(id="system", username="system", email="system@poweron.swiss")
- sessionInterface = interfaceDb.getInterface(systemUser, featureInstanceId=instanceId)
+ rootInterface = getRootInterface()
+ rootUser = rootInterface.currentUser
+ sessionInterface = interfaceDb.getInterface(rootUser, featureInstanceId=instanceId)
session = sessionInterface.getSession(sessionId)
mandateId = session.get("mandateId") if session else None
startedByUserId = session.get("startedByUserId") if session else None
- rootInterface = getRootInterface()
originalUser = rootInterface.getUser(startedByUserId) if startedByUserId else None
if not originalUser:
- originalUser = systemUser
+ originalUser = rootUser
# Process transcript through the service pipeline
from .service import TeamsbotService
@@ -1306,18 +1306,16 @@ async def postBotStatus(
try:
config = _getInstanceConfig(instanceId)
- from modules.datamodels.datamodelUam import User
-
- systemUser = User(id="system", username="system", email="system@poweron.swiss")
- sessionInterface = interfaceDb.getInterface(systemUser, featureInstanceId=instanceId)
+ rootInterface = getRootInterface()
+ rootUser = rootInterface.currentUser
+ sessionInterface = interfaceDb.getInterface(rootUser, featureInstanceId=instanceId)
session = sessionInterface.getSession(sessionId)
mandateId = session.get("mandateId") if session else None
startedByUserId = session.get("startedByUserId") if session else None
- rootInterface = getRootInterface()
originalUser = rootInterface.getUser(startedByUserId) if startedByUserId else None
if not originalUser:
- originalUser = systemUser
+ originalUser = rootUser
from .service import TeamsbotService
service = TeamsbotService(originalUser, mandateId, instanceId, config)
@@ -1359,22 +1357,20 @@ async def botWebsocket(
# Load the original user who started the session (has RBAC roles in mandate)
# Bot callbacks have no HTTP auth, so we reconstruct the user context from the session record.
- from modules.datamodels.datamodelUam import User
from modules.interfaces.interfaceDbApp import getRootInterface
- systemUser = User(id="system", username="system", email="system@poweron.swiss")
- sessionInterface = interfaceDb.getInterface(systemUser, featureInstanceId=instanceId)
+ rootInterface = getRootInterface()
+ rootUser = rootInterface.currentUser
+ sessionInterface = interfaceDb.getInterface(rootUser, featureInstanceId=instanceId)
session = sessionInterface.getSession(sessionId)
mandateId = session.get("mandateId") if session else None
startedByUserId = session.get("startedByUserId") if session else None
- # Look up the original user (getRootInterface uses admin context, can load any user)
- rootInterface = getRootInterface()
originalUser = rootInterface.getUser(startedByUserId) if startedByUserId else None
if not originalUser:
- logger.warning(f"Could not load original user {startedByUserId}, falling back to system user")
- originalUser = systemUser
+ logger.warning(f"Could not load original user {startedByUserId}, falling back to root user")
+ originalUser = rootUser
# Build effective config with the session's actual bot name.
# The session stores the resolved bot name (from system bot or user override).
diff --git a/modules/features/trustee/accounting/accountingConnectorBase.py b/modules/features/trustee/accounting/accountingConnectorBase.py
index 44044729..355b6f34 100644
--- a/modules/features/trustee/accounting/accountingConnectorBase.py
+++ b/modules/features/trustee/accounting/accountingConnectorBase.py
@@ -4,7 +4,7 @@
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel
class AccountingBookingLine(BaseModel):
@@ -51,7 +51,7 @@ class SyncResult(BaseModel):
class ConnectorConfigField(BaseModel):
"""Describes a configuration field required by a connector."""
key: str
- label: Dict[str, str]
+ label: str
fieldType: str = "text"
secret: bool = False
required: bool = True
@@ -70,8 +70,8 @@ class BaseAccountingConnector(ABC):
"""Unique type identifier, e.g. 'rma', 'bexio', 'abacus'."""
@abstractmethod
- def getConnectorLabel(self) -> Dict[str, str]:
- """I18n display label."""
+ def getConnectorLabel(self) -> str:
+ """German plaintext label (used as i18n key)."""
@abstractmethod
def getRequiredConfigFields(self) -> List[ConnectorConfigField]:
diff --git a/modules/features/trustee/accounting/accountingDataSync.py b/modules/features/trustee/accounting/accountingDataSync.py
index d393de76..e0584a02 100644
--- a/modules/features/trustee/accounting/accountingDataSync.py
+++ b/modules/features/trustee/accounting/accountingDataSync.py
@@ -5,10 +5,13 @@
Flow: load config → resolve connector → fetch data → clear old records → write new records → compute balances.
"""
+import json as _json
import logging
+import os
import time
from collections import defaultdict
-from typing import Dict, Any, Optional
+from pathlib import Path
+from typing import Dict, Any, List, Optional
from .accountingConnectorBase import BaseAccountingConnector
from .accountingRegistry import _getAccountingRegistry
@@ -16,6 +19,45 @@ from .accountingRegistry import _getAccountingRegistry
logger = logging.getLogger(__name__)
+_DEBUG_SYNC_DIR = Path("D:/Athi/Local/Web/poweron/local/debug/sync")
+
+
+def _debugSyncDir() -> Path:
+ _DEBUG_SYNC_DIR.mkdir(parents=True, exist_ok=True)
+ return _DEBUG_SYNC_DIR
+
+
+def _isDebugEnabled() -> bool:
+ try:
+ from modules.shared.configuration import APP_CONFIG
+ return APP_CONFIG.get("APP_LOGGING_FILE_ENABLED", False) is True or str(APP_CONFIG.get("APP_LOGGING_FILE_ENABLED", "")).lower() == "true"
+ except Exception:
+ return False
+
+
+def _dumpSyncData(tag: str, rows: list):
+ """Write raw connector data to a timestamped JSON file in local/debug/sync/."""
+ if not _isDebugEnabled():
+ return
+ try:
+ d = _debugSyncDir()
+ ts = time.strftime("%Y%m%d-%H%M%S")
+ path = d / f"{ts}_{tag}.json"
+ serializable = []
+ for r in rows:
+ if isinstance(r, dict):
+ serializable.append(r)
+ elif hasattr(r, "__dict__"):
+ serializable.append({k: v for k, v in r.__dict__.items() if not k.startswith("_")})
+ else:
+ serializable.append(str(r))
+ with open(path, "w", encoding="utf-8") as f:
+ _json.dump({"count": len(serializable), "rows": serializable}, f, ensure_ascii=False, indent=2, default=str)
+ logger.info(f"Debug sync dump: {path.name} ({len(serializable)} rows)")
+ except Exception as e:
+ logger.warning(f"Failed to write debug sync dump for {tag}: {e}")
+
+
class AccountingDataSync:
"""Imports accounting data (read-only) from an external system into local TrusteeData* tables."""
@@ -86,6 +128,7 @@ class AccountingDataSync:
# 1) Chart of accounts
try:
charts = await connector.getChartOfAccounts(connConfig)
+ _dumpSyncData("accounts", charts)
fetchedAccountNumbers = [acc.accountNumber for acc in charts if acc.accountNumber]
self._clearTable(TrusteeDataAccount, featureInstanceId)
for acc in charts:
@@ -105,6 +148,7 @@ class AccountingDataSync:
# 2) Journal entries + lines (pass already-fetched chart to avoid redundant API call)
try:
rawEntries = await connector.getJournalEntries(connConfig, dateFrom=dateFrom, dateTo=dateTo, accountNumbers=fetchedAccountNumbers or None)
+ _dumpSyncData("journalEntries", rawEntries)
self._clearTable(TrusteeDataJournalEntry, featureInstanceId)
self._clearTable(TrusteeDataJournalLine, featureInstanceId)
lineCount = 0
@@ -146,11 +190,13 @@ class AccountingDataSync:
contactCount = 0
customers = await connector.getCustomers(connConfig)
+ _dumpSyncData("customers", customers)
for c in customers:
self._if.db.recordCreate(TrusteeDataContact, self._mapContact(c, "customer", scope))
contactCount += 1
vendors = await connector.getVendors(connConfig)
+ _dumpSyncData("vendors", vendors)
for v in vendors:
self._if.db.recordCreate(TrusteeDataContact, self._mapContact(v, "vendor", scope))
contactCount += 1
diff --git a/modules/features/trustee/accounting/accountingRegistry.py b/modules/features/trustee/accounting/accountingRegistry.py
index 4d0a3c1c..ca5e27d9 100644
--- a/modules/features/trustee/accounting/accountingRegistry.py
+++ b/modules/features/trustee/accounting/accountingRegistry.py
@@ -58,10 +58,15 @@ class AccountingRegistry:
self.discoverConnectors()
result = []
for connectorType, connector in self._connectors.items():
+ fields = []
+ for f in connector.getRequiredConfigFields():
+ fd = f.model_dump()
+ fd["label"] = f.label
+ fields.append(fd)
result.append({
"connectorType": connectorType,
"label": connector.getConnectorLabel(),
- "configFields": [f.model_dump() for f in connector.getRequiredConfigFields()],
+ "configFields": fields,
})
return result
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py b/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py
index eec3fef0..0269a654 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorAbacus.py
@@ -22,6 +22,7 @@ from ..accountingConnectorBase import (
ConnectorConfigField,
SyncResult,
)
+from modules.shared.i18nRegistry import t
logger = logging.getLogger(__name__)
@@ -34,34 +35,34 @@ class AccountingConnectorAbacus(BaseAccountingConnector):
def getConnectorType(self) -> str:
return "abacus"
- def getConnectorLabel(self) -> Dict[str, str]:
- return {"en": "Abacus ERP", "de": "Abacus ERP", "fr": "Abacus ERP"}
+ def getConnectorLabel(self) -> str:
+ return "Abacus ERP"
def getRequiredConfigFields(self) -> List[ConnectorConfigField]:
return [
ConnectorConfigField(
key="apiBaseUrl",
- label={"en": "API Base URL", "de": "API Base URL", "fr": "URL de base API"},
+ label=t("API Base URL"),
fieldType="text",
secret=False,
placeholder="e.g. https://abacus.meinefirma.ch/api/entity/v1/",
),
ConnectorConfigField(
key="clientName",
- label={"en": "Client Name", "de": "Mandantenname", "fr": "Nom du client"},
+ label=t("Mandantenname"),
fieldType="text",
secret=False,
placeholder="e.g. 7777",
),
ConnectorConfigField(
key="clientId",
- label={"en": "Client ID", "de": "Client-ID", "fr": "ID Client"},
+ label=t("Client-ID"),
fieldType="text",
secret=False,
),
ConnectorConfigField(
key="clientSecret",
- label={"en": "Client Secret", "de": "Client-Secret", "fr": "Secret Client"},
+ label=t("Client-Secret"),
fieldType="password",
secret=True,
),
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py b/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py
index a1e588d6..dcb3233d 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorBexio.py
@@ -21,6 +21,7 @@ from ..accountingConnectorBase import (
ConnectorConfigField,
SyncResult,
)
+from modules.shared.i18nRegistry import t
logger = logging.getLogger(__name__)
@@ -35,28 +36,28 @@ class AccountingConnectorBexio(BaseAccountingConnector):
def getConnectorType(self) -> str:
return "bexio"
- def getConnectorLabel(self) -> Dict[str, str]:
- return {"en": "Bexio", "de": "Bexio", "fr": "Bexio"}
+ def getConnectorLabel(self) -> str:
+ return "Bexio"
def getRequiredConfigFields(self) -> List[ConnectorConfigField]:
return [
ConnectorConfigField(
key="apiBaseUrl",
- label={"en": "API Base URL", "de": "API Base URL", "fr": "URL de base API"},
+ label=t("API Base URL"),
fieldType="text",
secret=False,
placeholder="https://api.bexio.com/",
),
ConnectorConfigField(
key="clientName",
- label={"en": "Client Name", "de": "Mandantenname", "fr": "Nom du client"},
+ label=t("Mandantenname"),
fieldType="text",
secret=False,
placeholder="e.g. poweronag",
),
ConnectorConfigField(
key="accessToken",
- label={"en": "Personal Access Token", "de": "Persönlicher Zugriffstoken", "fr": "Jeton d'accès personnel"},
+ label=t("Persönlicher Zugriffstoken"),
fieldType="password",
secret=True,
placeholder="PAT from developer.bexio.com",
diff --git a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
index 15aa7ca9..bcf52561 100644
--- a/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
+++ b/modules/features/trustee/accounting/connectors/accountingConnectorRma.py
@@ -24,6 +24,7 @@ from ..accountingConnectorBase import (
ConnectorConfigField,
SyncResult,
)
+from modules.shared.i18nRegistry import t
logger = logging.getLogger(__name__)
@@ -35,28 +36,28 @@ class AccountingConnectorRma(BaseAccountingConnector):
def getConnectorType(self) -> str:
return "rma"
- def getConnectorLabel(self) -> Dict[str, str]:
- return {"en": "Run My Accounts", "de": "Run My Accounts", "fr": "Run My Accounts"}
+ def getConnectorLabel(self) -> str:
+ return "Run My Accounts"
def getRequiredConfigFields(self) -> List[ConnectorConfigField]:
return [
ConnectorConfigField(
key="apiBaseUrl",
- label={"en": "API Base URL", "de": "API Base URL", "fr": "URL de base API"},
+ label=t("API Base URL"),
fieldType="text",
secret=False,
placeholder="https://service.runmyaccounts.com/api/latest/clients/",
),
ConnectorConfigField(
key="clientName",
- label={"en": "Client Name", "de": "Mandantenname", "fr": "Nom du client"},
+ label=t("Mandantenname"),
fieldType="text",
secret=False,
placeholder="e.g. meinefirma",
),
ConnectorConfigField(
key="apiKey",
- label={"en": "API Key", "de": "API-Schlüssel", "fr": "Clé API"},
+ label=t("API-Schlüssel"),
fieldType="password",
secret=True,
),
@@ -227,6 +228,10 @@ class AccountingConnectorRma(BaseAccountingConnector):
if rawDesc and len(rawDesc) > 80:
payload["notes"] = rawDesc[:2000]
+ logger.debug("RMA pushBooking payload: batch=%s transdate=%s accounts=%s",
+ batchNumber, transdate,
+ [(t.get("accno"), t.get("debit_amount"), t.get("credit_amount")) for t in glTransactions])
+
async with aiohttp.ClientSession() as session:
url = self._buildUrl(config, "gl")
async with session.post(url, headers=self._buildHeaders(config), json=payload, timeout=aiohttp.ClientTimeout(total=30)) as resp:
diff --git a/modules/features/trustee/datamodelFeatureTrustee.py b/modules/features/trustee/datamodelFeatureTrustee.py
index 0889e361..ccb5a407 100644
--- a/modules/features/trustee/datamodelFeatureTrustee.py
+++ b/modules/features/trustee/datamodelFeatureTrustee.py
@@ -7,15 +7,16 @@ from typing import Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
-
+@i18nModel("Organisation")
class TrusteeOrganisation(PowerOnModel):
"""Represents trustee organisations (companies) within the Trustee feature."""
id: str = Field( # Unique string label (PK), not UUID
description="Unique organisation identifier (label)",
json_schema_extra={
+ "label": "ID",
"frontend_type": "text",
"frontend_readonly": False, # Editable at creation, then readonly
"frontend_required": True
@@ -24,6 +25,7 @@ class TrusteeOrganisation(PowerOnModel):
label: str = Field(
description="Company name",
json_schema_extra={
+ "label": "Bezeichnung",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": True
@@ -33,6 +35,7 @@ class TrusteeOrganisation(PowerOnModel):
default=True,
description="Whether the organisation is enabled",
json_schema_extra={
+ "label": "Aktiviert",
"frontend_type": "checkbox",
"frontend_readonly": False,
"frontend_required": False
@@ -42,6 +45,7 @@ class TrusteeOrganisation(PowerOnModel):
default=None,
description="Mandate ID (system-level organisation)",
json_schema_extra={
+ "label": "Mandat",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -51,6 +55,7 @@ class TrusteeOrganisation(PowerOnModel):
default=None,
description="Feature Instance ID for instance-level isolation",
json_schema_extra={
+ "label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -59,25 +64,13 @@ class TrusteeOrganisation(PowerOnModel):
# System attributes are automatically set by DatabaseConnector:
# sysCreatedAt, sysModifiedAt, sysCreatedBy, sysModifiedBy (PowerOnModel)
-
-registerModelLabels(
- "TrusteeOrganisation",
- {"en": "Organisation", "fr": "Organisation", "de": "Organisation"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "label": {"en": "Label", "fr": "Libellé", "de": "Bezeichnung"},
- "enabled": {"en": "Enabled", "fr": "Activé", "de": "Aktiviert"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité", "de": "Feature-Instanz"},
- },
-)
-
-
+@i18nModel("Rolle")
class TrusteeRole(PowerOnModel):
"""Defines roles within the Trustee feature."""
id: str = Field( # Unique string label (PK), not UUID
description="Unique role identifier (label)",
json_schema_extra={
+ "label": "ID",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": True
@@ -86,6 +79,7 @@ class TrusteeRole(PowerOnModel):
desc: str = Field(
description="Role description",
json_schema_extra={
+ "label": "Beschreibung",
"frontend_type": "textarea",
"frontend_readonly": False,
"frontend_required": True
@@ -95,6 +89,7 @@ class TrusteeRole(PowerOnModel):
default=None,
description="Mandate ID",
json_schema_extra={
+ "label": "Mandat",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -104,6 +99,7 @@ class TrusteeRole(PowerOnModel):
default=None,
description="Feature Instance ID for instance-level isolation",
json_schema_extra={
+ "label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -111,25 +107,14 @@ class TrusteeRole(PowerOnModel):
)
# System attributes are automatically set by DatabaseConnector
-
-registerModelLabels(
- "TrusteeRole",
- {"en": "Role", "fr": "Rôle", "de": "Rolle"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "desc": {"en": "Description", "fr": "Description", "de": "Beschreibung"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité", "de": "Feature-Instanz"},
- },
-)
-
-
+@i18nModel("Zugriff")
class TrusteeAccess(PowerOnModel):
"""Defines user access to organisations with specific roles."""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique access ID",
json_schema_extra={
+ "label": "ID",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -138,6 +123,7 @@ class TrusteeAccess(PowerOnModel):
organisationId: str = Field(
description="Reference to TrusteeOrganisation.id",
json_schema_extra={
+ "label": "Organisation",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
@@ -147,6 +133,7 @@ class TrusteeAccess(PowerOnModel):
roleId: str = Field(
description="Reference to TrusteeRole.id",
json_schema_extra={
+ "label": "Rolle",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
@@ -156,6 +143,7 @@ class TrusteeAccess(PowerOnModel):
userId: str = Field(
description="User ID assigned to this role",
json_schema_extra={
+ "label": "Benutzer",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
@@ -166,6 +154,7 @@ class TrusteeAccess(PowerOnModel):
default=None,
description="Optional reference to TrusteeContract.id. If None, access is for full organisation. If set, access is limited to this specific contract.",
json_schema_extra={
+ "label": "Vertrag (optional)",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": False,
@@ -177,6 +166,7 @@ class TrusteeAccess(PowerOnModel):
default=None,
description="Mandate ID",
json_schema_extra={
+ "label": "Mandat",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -186,6 +176,7 @@ class TrusteeAccess(PowerOnModel):
default=None,
description="Feature Instance ID for instance-level isolation",
json_schema_extra={
+ "label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -193,28 +184,14 @@ class TrusteeAccess(PowerOnModel):
)
# System attributes are automatically set by DatabaseConnector
-
-registerModelLabels(
- "TrusteeAccess",
- {"en": "Access", "fr": "Accès", "de": "Zugriff"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "organisationId": {"en": "Organisation", "fr": "Organisation", "de": "Organisation"},
- "roleId": {"en": "Role", "fr": "Rôle", "de": "Rolle"},
- "userId": {"en": "User", "fr": "Utilisateur", "de": "Benutzer"},
- "contractId": {"en": "Contract (optional)", "fr": "Contrat (optionnel)", "de": "Vertrag (optional)"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité", "de": "Feature-Instanz"},
- },
-)
-
-
+@i18nModel("Vertrag")
class TrusteeContract(PowerOnModel):
"""Defines customer contracts within organisations."""
id: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique contract ID",
json_schema_extra={
+ "label": "ID",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -223,6 +200,7 @@ class TrusteeContract(PowerOnModel):
organisationId: str = Field(
description="Reference to TrusteeOrganisation.id (immutable after creation)",
json_schema_extra={
+ "label": "Organisation",
"frontend_type": "select",
"frontend_readonly": False, # Editable at creation, then readonly
"frontend_required": True,
@@ -232,6 +210,7 @@ class TrusteeContract(PowerOnModel):
label: str = Field(
description="Label for the customer contract (e.g., 'Muster AG 2026')",
json_schema_extra={
+ "label": "Bezeichnung",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": True
@@ -241,6 +220,7 @@ class TrusteeContract(PowerOnModel):
default=True,
description="Whether the contract is enabled",
json_schema_extra={
+ "label": "Aktiviert",
"frontend_type": "checkbox",
"frontend_readonly": False,
"frontend_required": False
@@ -250,6 +230,7 @@ class TrusteeContract(PowerOnModel):
default=None,
description="Mandate ID",
json_schema_extra={
+ "label": "Mandat",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -259,6 +240,7 @@ class TrusteeContract(PowerOnModel):
default=None,
description="Feature Instance ID for instance-level isolation",
json_schema_extra={
+ "label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -266,21 +248,6 @@ class TrusteeContract(PowerOnModel):
)
# System attributes are automatically set by DatabaseConnector
-
-registerModelLabels(
- "TrusteeContract",
- {"en": "Contract", "fr": "Contrat", "de": "Vertrag"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "organisationId": {"en": "Organisation", "fr": "Organisation", "de": "Organisation"},
- "label": {"en": "Label", "fr": "Libellé", "de": "Bezeichnung"},
- "enabled": {"en": "Enabled", "fr": "Activé", "de": "Aktiviert"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité", "de": "Feature-Instanz"},
- },
-)
-
-
class TrusteeDocumentTypeEnum(str, Enum):
"""Document type for trustee documents (expense extraction, ingest, sync)."""
INVOICE = "invoice"
@@ -290,7 +257,7 @@ class TrusteeDocumentTypeEnum(str, Enum):
UNKNOWN = "unknown"
AUTO = "auto"
-
+@i18nModel("Dokument")
class TrusteeDocument(PowerOnModel):
"""Contains document references for bookings.
@@ -305,6 +272,7 @@ class TrusteeDocument(PowerOnModel):
default_factory=lambda: str(uuid.uuid4()),
description="Unique document ID",
json_schema_extra={
+ "label": "ID",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -314,6 +282,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="Reference to central Files table (Files.id)",
json_schema_extra={
+ "label": "Datei-Referenz",
"frontend_type": "file_reference",
"frontend_readonly": False,
"frontend_required": False
@@ -322,6 +291,7 @@ class TrusteeDocument(PowerOnModel):
documentName: str = Field(
description="File name (e.g., 'Beleg.pdf')",
json_schema_extra={
+ "label": "Dokumentname",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": True
@@ -331,6 +301,7 @@ class TrusteeDocument(PowerOnModel):
default="application/octet-stream",
description="MIME type of the document",
json_schema_extra={
+ "label": "MIME-Typ",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": True,
@@ -341,6 +312,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="Source type (e.g., 'sharepoint', 'upload', 'email')",
json_schema_extra={
+ "label": "Quelltyp",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -350,6 +322,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="Original source location (e.g., SharePoint path)",
json_schema_extra={
+ "label": "Quellort",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -359,6 +332,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="Mandate ID (auto-set from context)",
json_schema_extra={
+ "label": "Mandat",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -369,6 +343,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="Feature Instance ID for instance-level isolation (auto-set from context)",
json_schema_extra={
+ "label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -379,6 +354,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="Document type (e.g. invoice, expense_receipt, bank_document, contract); use TrusteeDocumentTypeEnum values",
json_schema_extra={
+ "label": "Dokumenttyp",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -388,6 +364,7 @@ class TrusteeDocument(PowerOnModel):
default=None,
description="External Beleg-ID in accounting system (e.g. RMA); set on first successful upload, reused on re-sync",
json_schema_extra={
+ "label": "Beleg-ID (Buchhaltung)",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -396,25 +373,7 @@ class TrusteeDocument(PowerOnModel):
)
# System attributes are automatically set by DatabaseConnector
-
-registerModelLabels(
- "TrusteeDocument",
- {"en": "Document", "fr": "Document", "de": "Dokument"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "fileId": {"en": "File Reference", "fr": "Référence du fichier", "de": "Datei-Referenz"},
- "documentName": {"en": "Document Name", "fr": "Nom du document", "de": "Dokumentname"},
- "documentMimeType": {"en": "MIME Type", "fr": "Type MIME", "de": "MIME-Typ"},
- "sourceType": {"en": "Source Type", "fr": "Type de source", "de": "Quelltyp"},
- "sourceLocation": {"en": "Source Location", "fr": "Emplacement source", "de": "Quellort"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité", "de": "Feature-Instanz"},
- "documentType": {"en": "Document Type", "fr": "Type de document", "de": "Dokumenttyp"},
- "externalBelegId": {"en": "Beleg ID (Accounting)", "fr": "ID Beleg (Comptabilité)", "de": "Beleg-ID (Buchhaltung)"},
- },
-)
-
-
+@i18nModel("Position")
class TrusteePosition(PowerOnModel):
"""Contains booking positions (expense entries).
@@ -425,6 +384,7 @@ class TrusteePosition(PowerOnModel):
default_factory=lambda: str(uuid.uuid4()),
description="Unique position ID",
json_schema_extra={
+ "label": "ID",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False
@@ -434,6 +394,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Reference to TrusteeDocument.id (Beleg / primary document)",
json_schema_extra={
+ "label": "Dokument",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": False,
@@ -444,6 +405,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Reference to TrusteeDocument.id (Bank-Referenz / second document)",
json_schema_extra={
+ "label": "Bank-Referenz",
"frontend_type": "select",
"frontend_readonly": False,
"frontend_required": False,
@@ -454,6 +416,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Value date (ISO format: YYYY-MM-DD)",
json_schema_extra={
+ "label": "Valutadatum",
"frontend_type": "date",
"frontend_readonly": False,
"frontend_required": True
@@ -463,6 +426,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Transaction timestamp (UTC timestamp in seconds)",
json_schema_extra={
+ "label": "Transaktionszeitpunkt",
"frontend_type": "timestamp",
"frontend_readonly": False,
"frontend_required": True
@@ -472,6 +436,7 @@ class TrusteePosition(PowerOnModel):
default="",
description="Company name",
json_schema_extra={
+ "label": "Firma",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -481,6 +446,7 @@ class TrusteePosition(PowerOnModel):
default="",
description="Description",
json_schema_extra={
+ "label": "Beschreibung",
"frontend_type": "textarea",
"frontend_readonly": False,
"frontend_required": False
@@ -490,6 +456,7 @@ class TrusteePosition(PowerOnModel):
default="",
description="Tags (comma-separated)",
json_schema_extra={
+ "label": "Tags",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -503,10 +470,10 @@ class TrusteePosition(PowerOnModel):
"frontend_readonly": False,
"frontend_required": True,
"frontend_options": [
- {"value": "CHF", "label": {"en": "CHF", "fr": "CHF", "de": "CHF"}},
- {"value": "EUR", "label": {"en": "EUR", "fr": "EUR", "de": "EUR"}},
- {"value": "USD", "label": {"en": "USD", "fr": "USD", "de": "USD"}},
- {"value": "GBP", "label": {"en": "GBP", "fr": "GBP", "de": "GBP"}},
+ {"value": "CHF", "label": "CHF"},
+ {"value": "EUR", "label": "EUR"},
+ {"value": "USD", "label": "USD"},
+ {"value": "GBP", "label": "GBP"},
]
}
)
@@ -514,6 +481,7 @@ class TrusteePosition(PowerOnModel):
default=0.0,
description="Booking amount",
json_schema_extra={
+ "label": "Buchungsbetrag",
"frontend_type": "number",
"frontend_readonly": False,
"frontend_required": True
@@ -527,10 +495,10 @@ class TrusteePosition(PowerOnModel):
"frontend_readonly": False,
"frontend_required": True,
"frontend_options": [
- {"value": "CHF", "label": {"en": "CHF", "fr": "CHF", "de": "CHF"}},
- {"value": "EUR", "label": {"en": "EUR", "fr": "EUR", "de": "EUR"}},
- {"value": "USD", "label": {"en": "USD", "fr": "USD", "de": "USD"}},
- {"value": "GBP", "label": {"en": "GBP", "fr": "GBP", "de": "GBP"}},
+ {"value": "CHF", "label": "CHF"},
+ {"value": "EUR", "label": "EUR"},
+ {"value": "USD", "label": "USD"},
+ {"value": "GBP", "label": "GBP"},
]
}
)
@@ -538,6 +506,7 @@ class TrusteePosition(PowerOnModel):
default=0.0,
description="Original amount (manual input, no automatic currency conversion)",
json_schema_extra={
+ "label": "Originalbetrag",
"frontend_type": "number",
"frontend_readonly": False,
"frontend_required": True
@@ -547,6 +516,7 @@ class TrusteePosition(PowerOnModel):
default=0.0,
description="VAT percentage",
json_schema_extra={
+ "label": "MwSt-Prozentsatz",
"frontend_type": "number",
"frontend_readonly": False,
"frontend_required": False
@@ -556,6 +526,7 @@ class TrusteePosition(PowerOnModel):
default=0.0,
description="VAT amount (calculated: bookingAmount * vatPercentage / 100, can be manually overridden)",
json_schema_extra={
+ "label": "MwSt-Betrag",
"frontend_type": "number",
"frontend_readonly": False,
"frontend_required": False
@@ -565,6 +536,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Debit account number (e.g. '4200' for expenses)",
json_schema_extra={
+ "label": "Soll-Konto",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -574,6 +546,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Credit account number (e.g. '1020' for bank)",
json_schema_extra={
+ "label": "Haben-Konto",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -583,6 +556,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Tax code for the accounting system",
json_schema_extra={
+ "label": "Steuercode",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -592,6 +566,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Cost center identifier",
json_schema_extra={
+ "label": "Kostenstelle",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -601,6 +576,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Booking reference (e.g. voucher number)",
json_schema_extra={
+ "label": "Buchungsreferenz",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -614,11 +590,11 @@ class TrusteePosition(PowerOnModel):
"frontend_readonly": True,
"frontend_required": False,
"frontend_options": [
- {"value": "invoice", "label": {"en": "Invoice", "fr": "Facture", "de": "Rechnung"}},
- {"value": "expense_receipt", "label": {"en": "Expense Receipt", "fr": "Reçu", "de": "Beleg"}},
- {"value": "bank_document", "label": {"en": "Bank Statement", "fr": "Relevé bancaire", "de": "Bankauszug"}},
- {"value": "contract", "label": {"en": "Contract", "fr": "Contrat", "de": "Vertrag"}},
- {"value": "unknown", "label": {"en": "Other", "fr": "Autre", "de": "Sonstige"}},
+ {"value": "invoice", "label": "Rechnung"},
+ {"value": "expense_receipt", "label": "Beleg"},
+ {"value": "bank_document", "label": "Bankauszug"},
+ {"value": "contract", "label": "Vertrag"},
+ {"value": "unknown", "label": "Sonstige"},
]
}
)
@@ -626,6 +602,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="IBAN of the payment recipient (from invoice / QR code)",
json_schema_extra={
+ "label": "Empfänger-IBAN",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -635,6 +612,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Bank or account holder name of the payment recipient",
json_schema_extra={
+ "label": "Empfänger-Name",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -644,6 +622,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="BIC / SWIFT code of the recipient bank",
json_schema_extra={
+ "label": "Empfänger-BIC",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -653,6 +632,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Structured payment reference (QR-Referenz, ESR, SCOR, Mitteilung)",
json_schema_extra={
+ "label": "Zahlungsreferenz",
"frontend_type": "text",
"frontend_readonly": False,
"frontend_required": False
@@ -662,6 +642,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Payment due date (ISO format: YYYY-MM-DD)",
json_schema_extra={
+ "label": "Fälligkeitsdatum",
"frontend_type": "date",
"frontend_readonly": False,
"frontend_required": False
@@ -671,6 +652,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Mandate ID (auto-set from context)",
json_schema_extra={
+ "label": "Mandat",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -681,6 +663,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="Feature Instance ID for instance-level isolation (auto-set from context)",
json_schema_extra={
+ "label": "Feature-Instanz",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -691,6 +674,7 @@ class TrusteePosition(PowerOnModel):
default=None,
description="External ID (UUID) of the synced record in the accounting system; set by sync, used for duplicate check",
json_schema_extra={
+ "label": "Buha-Sync-ID",
"frontend_type": "text",
"frontend_readonly": True,
"frontend_required": False,
@@ -698,283 +682,118 @@ class TrusteePosition(PowerOnModel):
}
)
-registerModelLabels(
- "TrusteePosition",
- {"en": "Position", "fr": "Position", "de": "Position"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "documentId": {"en": "Document", "fr": "Document", "de": "Dokument"},
- "bankDocumentId": {"en": "Bank Reference", "fr": "Référence bancaire", "de": "Bank-Referenz"},
- "valuta": {"en": "Value Date", "fr": "Date de valeur", "de": "Valutadatum"},
- "transactionDateTime": {"en": "Transaction Date/Time", "fr": "Date/Heure de transaction", "de": "Transaktionszeitpunkt"},
- "company": {"en": "Company", "fr": "Entreprise", "de": "Firma"},
- "desc": {"en": "Description", "fr": "Description", "de": "Beschreibung"},
- "tags": {"en": "Tags", "fr": "Tags", "de": "Tags"},
- "bookingCurrency": {"en": "Booking Currency", "fr": "Devise de comptabilisation", "de": "Buchungswährung"},
- "bookingAmount": {"en": "Booking Amount", "fr": "Montant de comptabilisation", "de": "Buchungsbetrag"},
- "originalCurrency": {"en": "Original Currency", "fr": "Devise d'origine", "de": "Originalwährung"},
- "originalAmount": {"en": "Original Amount", "fr": "Montant d'origine", "de": "Originalbetrag"},
- "vatPercentage": {"en": "VAT Percentage", "fr": "Pourcentage TVA", "de": "MwSt-Prozentsatz"},
- "vatAmount": {"en": "VAT Amount", "fr": "Montant TVA", "de": "MwSt-Betrag"},
- "debitAccountNumber": {"en": "Debit Account", "fr": "Compte débit", "de": "Soll-Konto"},
- "creditAccountNumber": {"en": "Credit Account", "fr": "Compte crédit", "de": "Haben-Konto"},
- "taxCode": {"en": "Tax Code", "fr": "Code TVA", "de": "Steuercode"},
- "costCenter": {"en": "Cost Center", "fr": "Centre de coûts", "de": "Kostenstelle"},
- "bookingReference": {"en": "Booking Reference", "fr": "Référence de réservation", "de": "Buchungsreferenz"},
- "documentType": {"en": "Document Type", "fr": "Type de document", "de": "Dokumenttyp"},
- "payeeIban": {"en": "Payee IBAN", "fr": "IBAN bénéficiaire", "de": "Empfänger-IBAN"},
- "payeeName": {"en": "Payee Name", "fr": "Nom du bénéficiaire", "de": "Empfänger-Name"},
- "payeeBic": {"en": "Payee BIC/SWIFT", "fr": "BIC/SWIFT bénéficiaire", "de": "Empfänger-BIC"},
- "paymentReference": {"en": "Payment Reference", "fr": "Référence de paiement", "de": "Zahlungsreferenz"},
- "dueDate": {"en": "Due Date", "fr": "Date d'échéance", "de": "Fälligkeitsdatum"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance de fonctionnalité", "de": "Feature-Instanz"},
- "accountingSyncId": {"en": "Accounting Sync ID", "fr": "ID sync comptabilité", "de": "Buha-Sync-ID"},
- },
-)
-
-
# ── TrusteeData* tables (synced from external accounting apps for analysis) ──
-
+@i18nModel("Konto (Sync)")
class TrusteeDataAccount(PowerOnModel):
"""Chart of accounts synced from external accounting system."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- accountNumber: str = Field(description="Account number (e.g. '1020')")
- label: str = Field(default="", description="Account name")
- accountType: Optional[str] = Field(default=None, description="asset / liability / equity / revenue / expense")
- accountGroup: Optional[str] = Field(default=None, description="Account group/category")
- currency: str = Field(default="CHF", description="Account currency")
- isActive: bool = Field(default=True)
- mandateId: Optional[str] = Field(default=None)
- featureInstanceId: Optional[str] = Field(default=None)
-
-
-registerModelLabels(
- "TrusteeDataAccount",
- {"en": "Account (Synced)", "de": "Konto (Sync)", "fr": "Compte (Sync)"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "accountNumber": {"en": "Account Number", "de": "Kontonummer", "fr": "Numéro de compte"},
- "label": {"en": "Name", "de": "Bezeichnung", "fr": "Libellé"},
- "accountType": {"en": "Type", "de": "Typ", "fr": "Type"},
- "accountGroup": {"en": "Group", "de": "Gruppe", "fr": "Groupe"},
- "currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
- "isActive": {"en": "Active", "de": "Aktiv", "fr": "Actif"},
- "mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ accountNumber: str = Field(description="Account number (e.g. '1020')", json_schema_extra={"label": "Kontonummer"})
+ label: str = Field(default="", description="Account name", json_schema_extra={"label": "Bezeichnung"})
+ accountType: Optional[str] = Field(default=None, description="asset / liability / equity / revenue / expense", json_schema_extra={"label": "Typ"})
+ accountGroup: Optional[str] = Field(default=None, description="Account group/category", json_schema_extra={"label": "Gruppe"})
+ currency: str = Field(default="CHF", description="Account currency", json_schema_extra={"label": "Währung"})
+ isActive: bool = Field(default=True, json_schema_extra={"label": "Aktiv"})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
+ featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz"})
+@i18nModel("Buchung (Sync)")
class TrusteeDataJournalEntry(PowerOnModel):
"""Journal entry header synced from external accounting system."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- externalId: Optional[str] = Field(default=None, description="ID in the source system")
- bookingDate: Optional[str] = Field(default=None, description="Booking date (YYYY-MM-DD)")
- reference: Optional[str] = Field(default=None, description="Booking reference / voucher number")
- description: str = Field(default="", description="Booking text")
- currency: str = Field(default="CHF")
- totalAmount: float = Field(default=0.0, description="Total amount of entry")
- mandateId: Optional[str] = Field(default=None)
- featureInstanceId: Optional[str] = Field(default=None)
-
-
-registerModelLabels(
- "TrusteeDataJournalEntry",
- {"en": "Journal Entry (Synced)", "de": "Buchung (Sync)", "fr": "Écriture (Sync)"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "externalId": {"en": "External ID", "de": "Externe ID", "fr": "ID externe"},
- "bookingDate": {"en": "Date", "de": "Datum", "fr": "Date"},
- "reference": {"en": "Reference", "de": "Referenz", "fr": "Référence"},
- "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
- "currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
- "totalAmount": {"en": "Amount", "de": "Betrag", "fr": "Montant"},
- "mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ externalId: Optional[str] = Field(default=None, description="ID in the source system", json_schema_extra={"label": "Externe ID"})
+ bookingDate: Optional[str] = Field(default=None, description="Booking date (YYYY-MM-DD)", json_schema_extra={"label": "Datum"})
+ reference: Optional[str] = Field(default=None, description="Booking reference / voucher number", json_schema_extra={"label": "Referenz"})
+ description: str = Field(default="", description="Booking text", json_schema_extra={"label": "Beschreibung"})
+ currency: str = Field(default="CHF", json_schema_extra={"label": "Währung"})
+ totalAmount: float = Field(default=0.0, description="Total amount of entry", json_schema_extra={"label": "Betrag"})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
+ featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz"})
+@i18nModel("Buchungszeile (Sync)")
class TrusteeDataJournalLine(PowerOnModel):
"""Journal entry line (debit/credit) synced from external accounting system."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- journalEntryId: str = Field(description="FK → TrusteeDataJournalEntry.id")
- accountNumber: str = Field(description="Account number")
- debitAmount: float = Field(default=0.0)
- creditAmount: float = Field(default=0.0)
- currency: str = Field(default="CHF")
- taxCode: Optional[str] = Field(default=None)
- costCenter: Optional[str] = Field(default=None)
- description: str = Field(default="")
- mandateId: Optional[str] = Field(default=None)
- featureInstanceId: Optional[str] = Field(default=None)
-
-
-registerModelLabels(
- "TrusteeDataJournalLine",
- {"en": "Journal Line (Synced)", "de": "Buchungszeile (Sync)", "fr": "Ligne écriture (Sync)"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "journalEntryId": {"en": "Journal Entry", "de": "Buchung", "fr": "Écriture"},
- "accountNumber": {"en": "Account", "de": "Konto", "fr": "Compte"},
- "debitAmount": {"en": "Debit", "de": "Soll", "fr": "Débit"},
- "creditAmount": {"en": "Credit", "de": "Haben", "fr": "Crédit"},
- "currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
- "taxCode": {"en": "Tax Code", "de": "Steuercode", "fr": "Code TVA"},
- "costCenter": {"en": "Cost Center", "de": "Kostenstelle", "fr": "Centre de coûts"},
- "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"},
- "mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ journalEntryId: str = Field(description="FK → TrusteeDataJournalEntry.id", json_schema_extra={"label": "Buchung"})
+ accountNumber: str = Field(description="Account number", json_schema_extra={"label": "Konto"})
+ debitAmount: float = Field(default=0.0, json_schema_extra={"label": "Soll"})
+ creditAmount: float = Field(default=0.0, json_schema_extra={"label": "Haben"})
+ currency: str = Field(default="CHF", json_schema_extra={"label": "Währung"})
+ taxCode: Optional[str] = Field(default=None, json_schema_extra={"label": "Steuercode"})
+ costCenter: Optional[str] = Field(default=None, json_schema_extra={"label": "Kostenstelle"})
+ description: str = Field(default="", json_schema_extra={"label": "Beschreibung"})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
+ featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz"})
+@i18nModel("Kontakt (Sync)")
class TrusteeDataContact(PowerOnModel):
"""Customer or vendor synced from external accounting system."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- externalId: Optional[str] = Field(default=None, description="ID in the source system")
- contactType: str = Field(default="customer", description="customer / vendor / both")
- contactNumber: Optional[str] = Field(default=None, description="Customer/vendor number")
- name: str = Field(default="", description="Name / company")
- address: Optional[str] = Field(default=None)
- zip: Optional[str] = Field(default=None)
- city: Optional[str] = Field(default=None)
- country: Optional[str] = Field(default=None)
- email: Optional[str] = Field(default=None)
- phone: Optional[str] = Field(default=None)
- vatNumber: Optional[str] = Field(default=None)
- mandateId: Optional[str] = Field(default=None)
- featureInstanceId: Optional[str] = Field(default=None)
-
-
-registerModelLabels(
- "TrusteeDataContact",
- {"en": "Contact (Synced)", "de": "Kontakt (Sync)", "fr": "Contact (Sync)"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "externalId": {"en": "External ID", "de": "Externe ID", "fr": "ID externe"},
- "contactType": {"en": "Type", "de": "Typ", "fr": "Type"},
- "contactNumber": {"en": "Number", "de": "Nummer", "fr": "Numéro"},
- "name": {"en": "Name", "de": "Name", "fr": "Nom"},
- "address": {"en": "Address", "de": "Adresse", "fr": "Adresse"},
- "zip": {"en": "ZIP", "de": "PLZ", "fr": "NPA"},
- "city": {"en": "City", "de": "Ort", "fr": "Ville"},
- "country": {"en": "Country", "de": "Land", "fr": "Pays"},
- "email": {"en": "Email", "de": "E-Mail", "fr": "E-mail"},
- "phone": {"en": "Phone", "de": "Telefon", "fr": "Téléphone"},
- "vatNumber": {"en": "VAT Number", "de": "MWST-Nr.", "fr": "N° TVA"},
- "mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ externalId: Optional[str] = Field(default=None, description="ID in the source system", json_schema_extra={"label": "Externe ID"})
+ contactType: str = Field(default="customer", description="customer / vendor / both", json_schema_extra={"label": "Typ"})
+ contactNumber: Optional[str] = Field(default=None, description="Customer/vendor number", json_schema_extra={"label": "Nummer"})
+ name: str = Field(default="", description="Name / company", json_schema_extra={"label": "Name"})
+ address: Optional[str] = Field(default=None, json_schema_extra={"label": "Adresse"})
+ zip: Optional[str] = Field(default=None, json_schema_extra={"label": "PLZ"})
+ city: Optional[str] = Field(default=None, json_schema_extra={"label": "Ort"})
+ country: Optional[str] = Field(default=None, json_schema_extra={"label": "Land"})
+ email: Optional[str] = Field(default=None, json_schema_extra={"label": "E-Mail"})
+ phone: Optional[str] = Field(default=None, json_schema_extra={"label": "Telefon"})
+ vatNumber: Optional[str] = Field(default=None, json_schema_extra={"label": "MWST-Nr."})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
+ featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz"})
+@i18nModel("Kontosaldo (Sync)")
class TrusteeDataAccountBalance(PowerOnModel):
"""Account balance per period, derived from journal lines or directly from accounting system."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- accountNumber: str = Field(description="Account number")
- periodYear: int = Field(description="Fiscal year")
- periodMonth: int = Field(default=0, description="Month (1-12); 0 = annual total")
- openingBalance: float = Field(default=0.0)
- debitTotal: float = Field(default=0.0)
- creditTotal: float = Field(default=0.0)
- closingBalance: float = Field(default=0.0)
- currency: str = Field(default="CHF")
- mandateId: Optional[str] = Field(default=None)
- featureInstanceId: Optional[str] = Field(default=None)
-
-
-registerModelLabels(
- "TrusteeDataAccountBalance",
- {"en": "Account Balance (Synced)", "de": "Kontosaldo (Sync)", "fr": "Solde compte (Sync)"},
- {
- "id": {"en": "ID", "de": "ID", "fr": "ID"},
- "accountNumber": {"en": "Account", "de": "Konto", "fr": "Compte"},
- "periodYear": {"en": "Year", "de": "Jahr", "fr": "Année"},
- "periodMonth": {"en": "Month", "de": "Monat", "fr": "Mois"},
- "openingBalance": {"en": "Opening Balance", "de": "Eröffnungssaldo", "fr": "Solde d'ouverture"},
- "debitTotal": {"en": "Debit Total", "de": "Soll-Umsatz", "fr": "Total débit"},
- "creditTotal": {"en": "Credit Total", "de": "Haben-Umsatz", "fr": "Total crédit"},
- "closingBalance": {"en": "Closing Balance", "de": "Schlusssaldo", "fr": "Solde de clôture"},
- "currency": {"en": "Currency", "de": "Währung", "fr": "Devise"},
- "mandateId": {"en": "Mandate", "de": "Mandat", "fr": "Mandat"},
- "featureInstanceId": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ accountNumber: str = Field(description="Account number", json_schema_extra={"label": "Konto"})
+ periodYear: int = Field(description="Fiscal year", json_schema_extra={"label": "Jahr"})
+ periodMonth: int = Field(default=0, description="Month (1-12); 0 = annual total", json_schema_extra={"label": "Monat"})
+ openingBalance: float = Field(default=0.0, json_schema_extra={"label": "Eröffnungssaldo"})
+ debitTotal: float = Field(default=0.0, json_schema_extra={"label": "Soll-Umsatz"})
+ creditTotal: float = Field(default=0.0, json_schema_extra={"label": "Haben-Umsatz"})
+ closingBalance: float = Field(default=0.0, json_schema_extra={"label": "Schlusssaldo"})
+ currency: str = Field(default="CHF", json_schema_extra={"label": "Währung"})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
+ featureInstanceId: Optional[str] = Field(default=None, json_schema_extra={"label": "Feature-Instanz"})
+@i18nModel("Buchhaltungs-Konfiguration")
class TrusteeAccountingConfig(PowerOnModel):
"""Per-instance accounting system configuration with encrypted credentials.
Each feature instance can connect to exactly one accounting system.
Credentials are stored encrypted (decrypted at runtime by the AccountingBridge).
"""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- featureInstanceId: str = Field(description="FK -> FeatureInstance.id (1:1)")
- connectorType: str = Field(description="Connector type key, e.g. 'rma', 'bexio', 'abacus'")
- displayLabel: str = Field(default="", description="User-visible label for this integration")
- encryptedConfig: str = Field(default="", description="Encrypted JSON blob with connector credentials")
- isActive: bool = Field(default=True)
- lastSyncAt: Optional[float] = Field(default=None, description="Timestamp of last sync attempt")
- lastSyncStatus: Optional[str] = Field(default=None, description="Last sync result: success, error, partial")
- lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error")
- cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})")
- chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed")
- mandateId: Optional[str] = Field(default=None)
-
-
-registerModelLabels(
- "TrusteeAccountingConfig",
- {"en": "Accounting Configuration", "de": "Buchhaltungs-Konfiguration", "fr": "Configuration comptable"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "featureInstanceId": {"en": "Feature Instance", "fr": "Instance", "de": "Feature-Instanz"},
- "connectorType": {"en": "System", "fr": "Système", "de": "System"},
- "displayLabel": {"en": "Label", "fr": "Libellé", "de": "Bezeichnung"},
- "isActive": {"en": "Active", "fr": "Actif", "de": "Aktiv"},
- "lastSyncAt": {"en": "Last Sync", "fr": "Dernière sync.", "de": "Letzte Synchronisation"},
- "lastSyncStatus": {"en": "Status", "fr": "Statut", "de": "Status"},
- "lastSyncErrorMessage": {"en": "Error", "fr": "Erreur", "de": "Fehlermeldung"},
- "cachedChartOfAccounts": {"en": "Cached Chart", "de": "Cached Kontoplan", "fr": "Plan comptable en cache"},
- "chartCachedAt": {"en": "Chart Cached At", "de": "Kontoplan-Cache-Zeitpunkt", "fr": "Horodatage cache plan comptable"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- },
-)
-
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ featureInstanceId: str = Field(description="FK -> FeatureInstance.id (1:1)", json_schema_extra={"label": "Feature-Instanz"})
+ connectorType: str = Field(description="Connector type key, e.g. 'rma', 'bexio', 'abacus'", json_schema_extra={"label": "System"})
+ displayLabel: str = Field(default="", description="User-visible label for this integration", json_schema_extra={"label": "Bezeichnung"})
+ encryptedConfig: str = Field(default="", description="Encrypted JSON blob with connector credentials", json_schema_extra={"label": "Verschlüsselte Konfiguration"})
+ isActive: bool = Field(default=True, json_schema_extra={"label": "Aktiv"})
+ lastSyncAt: Optional[float] = Field(default=None, description="Timestamp of last sync attempt", json_schema_extra={"label": "Letzte Synchronisation"})
+ lastSyncStatus: Optional[str] = Field(default=None, description="Last sync result: success, error, partial", json_schema_extra={"label": "Status"})
+ lastSyncErrorMessage: Optional[str] = Field(default=None, description="Error message when lastSyncStatus is error", json_schema_extra={"label": "Fehlermeldung"})
+ cachedChartOfAccounts: Optional[str] = Field(default=None, description="JSON-serialised chart of accounts cache (list of {accountNumber, label, accountType})", json_schema_extra={"label": "Cached Kontoplan"})
+ chartCachedAt: Optional[float] = Field(default=None, description="Timestamp when cachedChartOfAccounts was last refreshed", json_schema_extra={"label": "Kontoplan-Cache-Zeitpunkt"})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
+@i18nModel("Buchhaltungs-Synchronisation")
class TrusteeAccountingSync(PowerOnModel):
"""Tracks which position was synced to which external system and when.
Used for duplicate prevention, audit trail, and retry logic.
"""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()))
- positionId: str = Field(description="FK -> TrusteePosition.id")
- featureInstanceId: str = Field(description="FK -> FeatureInstance.id")
- connectorType: str = Field(description="Connector type at time of sync")
- externalId: Optional[str] = Field(default=None, description="ID assigned by the external system")
- externalReference: Optional[str] = Field(default=None, description="Reference in the external system")
- syncStatus: str = Field(default="pending", description="pending | synced | error | cancelled")
- syncDirection: str = Field(default="push", description="push (local->ext) or pull (ext->local)")
- syncedAt: Optional[float] = Field(default=None, description="Timestamp of successful sync")
- errorMessage: Optional[str] = Field(default=None)
- bookingPayload: Optional[dict] = Field(default=None, description="Payload sent to the external system (audit)")
- mandateId: Optional[str] = Field(default=None)
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), json_schema_extra={"label": "ID"})
+ positionId: str = Field(description="FK -> TrusteePosition.id", json_schema_extra={"label": "Position"})
+ featureInstanceId: str = Field(description="FK -> FeatureInstance.id", json_schema_extra={"label": "Feature-Instanz"})
+ connectorType: str = Field(description="Connector type at time of sync", json_schema_extra={"label": "System"})
+ externalId: Optional[str] = Field(default=None, description="ID assigned by the external system", json_schema_extra={"label": "Externe ID"})
+ externalReference: Optional[str] = Field(default=None, description="Reference in the external system", json_schema_extra={"label": "Externe Referenz"})
+ syncStatus: str = Field(default="pending", description="pending | synced | error | cancelled", json_schema_extra={"label": "Status"})
+ syncDirection: str = Field(default="push", description="push (local->ext) or pull (ext->local)", json_schema_extra={"label": "Richtung"})
+ syncedAt: Optional[float] = Field(default=None, description="Timestamp of successful sync", json_schema_extra={"label": "Synchronisiert am"})
+ errorMessage: Optional[str] = Field(default=None, json_schema_extra={"label": "Fehler"})
+ bookingPayload: Optional[dict] = Field(default=None, description="Payload sent to the external system (audit)", json_schema_extra={"label": "Buchungs-Payload"})
+ mandateId: Optional[str] = Field(default=None, json_schema_extra={"label": "Mandat"})
-
-registerModelLabels(
- "TrusteeAccountingSync",
- {"en": "Accounting Sync", "de": "Buchhaltungs-Synchronisation", "fr": "Synchronisation comptable"},
- {
- "id": {"en": "ID", "fr": "ID", "de": "ID"},
- "positionId": {"en": "Position", "fr": "Position", "de": "Position"},
- "connectorType": {"en": "System", "fr": "Système", "de": "System"},
- "externalId": {"en": "External ID", "fr": "ID Externe", "de": "Externe ID"},
- "syncStatus": {"en": "Status", "fr": "Statut", "de": "Status"},
- "syncDirection": {"en": "Direction", "fr": "Direction", "de": "Richtung"},
- "syncedAt": {"en": "Synced At", "fr": "Synchronisé à", "de": "Synchronisiert am"},
- "errorMessage": {"en": "Error", "fr": "Erreur", "de": "Fehler"},
- "mandateId": {"en": "Mandate", "fr": "Mandat", "de": "Mandat"},
- },
-)
diff --git a/modules/features/trustee/mainTrustee.py b/modules/features/trustee/mainTrustee.py
index 2fd82bc5..65f5f7e4 100644
--- a/modules/features/trustee/mainTrustee.py
+++ b/modules/features/trustee/mainTrustee.py
@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
# Feature metadata
FEATURE_CODE = "trustee"
-FEATURE_LABEL = {"en": "Trustee", "de": "Treuhand", "fr": "Fiduciaire"}
+FEATURE_LABEL = "Treuhand"
FEATURE_ICON = "mdi-briefcase"
# UI Objects for RBAC catalog
@@ -20,37 +20,47 @@ FEATURE_ICON = "mdi-briefcase"
UI_OBJECTS = [
{
"objectKey": "ui.feature.trustee.dashboard",
- "label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
+ "label": "Dashboard",
"meta": {"area": "dashboard"}
},
{
"objectKey": "ui.feature.trustee.positions",
- "label": {"en": "Positions", "de": "Positionen", "fr": "Positions"},
+ "label": "Positionen",
"meta": {"area": "positions"}
},
{
"objectKey": "ui.feature.trustee.documents",
- "label": {"en": "Documents", "de": "Dokumente", "fr": "Documents"},
+ "label": "Dokumente",
"meta": {"area": "documents"}
},
{
"objectKey": "ui.feature.trustee.expense-import",
- "label": {"en": "Expense Import", "de": "Spesen Import", "fr": "Import de dépenses"},
+ "label": "Spesen Import",
"meta": {"area": "expense-import"}
},
{
"objectKey": "ui.feature.trustee.scan-upload",
- "label": {"en": "Scan / Upload", "de": "Scannen / Hochladen", "fr": "Scanner / Téléverser"},
+ "label": "Scannen / Hochladen",
"meta": {"area": "scan-upload"}
},
+ {
+ "objectKey": "ui.feature.trustee.analyse",
+ "label": "Analyse & Reporting",
+ "meta": {"area": "analyse"}
+ },
+ {
+ "objectKey": "ui.feature.trustee.abschluss",
+ "label": "Abschluss & Prüfung",
+ "meta": {"area": "abschluss"}
+ },
{
"objectKey": "ui.feature.trustee.settings",
- "label": {"en": "Accounting Settings", "de": "Buchhaltungs-Einstellungen", "fr": "Paramètres comptables"},
+ "label": "Buchhaltungs-Einstellungen",
"meta": {"area": "settings", "admin_only": True}
},
{
"objectKey": "ui.feature.trustee.instance-roles",
- "label": {"en": "Instance Roles & Permissions", "de": "Instanz-Rollen & Berechtigungen", "fr": "Rôles et permissions d'instance"},
+ "label": "Instanz-Rollen & Berechtigungen",
"meta": {"area": "admin", "admin_only": True}
},
]
@@ -60,7 +70,7 @@ UI_OBJECTS = [
DATA_OBJECTS = [
{
"objectKey": "data.feature.trustee.TrusteeOrganisation",
- "label": {"en": "Organisation", "de": "Organisation", "fr": "Organisation"},
+ "label": "Organisation",
"meta": {
"table": "TrusteeOrganisation",
"fields": ["id", "label", "enabled"],
@@ -70,7 +80,7 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.trustee.TrusteePosition",
- "label": {"en": "Position", "de": "Position", "fr": "Position"},
+ "label": "Position",
"meta": {
"table": "TrusteePosition",
"fields": ["id", "label", "description", "organisationId"],
@@ -80,12 +90,12 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.trustee.TrusteeDocument",
- "label": {"en": "Document", "de": "Dokument", "fr": "Document"},
+ "label": "Dokument",
"meta": {"table": "TrusteeDocument", "fields": ["id", "filename", "mimeType", "fileSize", "uploadDate"]}
},
{
"objectKey": "data.feature.trustee.TrusteeAccountingConfig",
- "label": {"en": "Accounting Config", "de": "Buchhaltungs-Konfiguration", "fr": "Config. comptable"},
+ "label": "Buchhaltungs-Konfiguration",
"meta": {
"table": "TrusteeAccountingConfig",
"fields": ["id", "connectorType", "displayLabel", "encryptedConfig", "isActive"],
@@ -95,37 +105,37 @@ DATA_OBJECTS = [
},
{
"objectKey": "data.feature.trustee.TrusteeAccountingSync",
- "label": {"en": "Accounting Sync", "de": "Buchhaltungs-Synchronisation", "fr": "Sync. comptable"},
+ "label": "Buchhaltungs-Synchronisation",
"meta": {"table": "TrusteeAccountingSync", "fields": ["id", "positionId", "syncStatus", "externalId"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataAccount",
- "label": {"en": "Accounts (Synced)", "de": "Kontenplan (Sync)", "fr": "Plan comptable (Sync)"},
+ "label": "Kontenplan (Sync)",
"meta": {"table": "TrusteeDataAccount", "fields": ["id", "accountNumber", "label", "accountType", "accountGroup", "currency", "isActive"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataJournalEntry",
- "label": {"en": "Journal Entries (Synced)", "de": "Buchungen (Sync)", "fr": "Écritures (Sync)"},
+ "label": "Buchungen (Sync)",
"meta": {"table": "TrusteeDataJournalEntry", "fields": ["id", "externalId", "bookingDate", "reference", "description", "currency", "totalAmount"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataJournalLine",
- "label": {"en": "Journal Lines (Synced)", "de": "Buchungszeilen (Sync)", "fr": "Lignes écriture (Sync)"},
+ "label": "Buchungszeilen (Sync)",
"meta": {"table": "TrusteeDataJournalLine", "fields": ["id", "journalEntryId", "accountNumber", "debitAmount", "creditAmount", "currency", "taxCode", "costCenter", "description"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataContact",
- "label": {"en": "Contacts (Synced)", "de": "Kontakte (Sync)", "fr": "Contacts (Sync)"},
+ "label": "Kontakte (Sync)",
"meta": {"table": "TrusteeDataContact", "fields": ["id", "externalId", "contactType", "contactNumber", "name", "address", "zip", "city", "country", "email", "phone", "vatNumber"]}
},
{
"objectKey": "data.feature.trustee.TrusteeDataAccountBalance",
- "label": {"en": "Account Balances (Synced)", "de": "Kontosalden (Sync)", "fr": "Soldes comptes (Sync)"},
+ "label": "Kontosalden (Sync)",
"meta": {"table": "TrusteeDataAccountBalance", "fields": ["id", "accountNumber", "periodYear", "periodMonth", "openingBalance", "debitTotal", "creditTotal", "closingBalance", "currency"]}
},
{
"objectKey": "data.feature.trustee.*",
- "label": {"en": "All Trustee Data", "de": "Alle Treuhand-Daten", "fr": "Toutes les données fiduciaires"},
+ "label": "Alle Treuhand-Daten",
"meta": {"wildcard": True, "description": "Wildcard for all trustee data tables"}
},
]
@@ -135,127 +145,379 @@ DATA_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.trustee.documents.create",
- "label": {"en": "Upload Document", "de": "Dokument hochladen", "fr": "Télécharger document"},
+ "label": "Dokument hochladen",
"meta": {"endpoint": "/api/trustee/{instanceId}/documents", "method": "POST"}
},
{
"objectKey": "resource.feature.trustee.documents.update",
- "label": {"en": "Update Document", "de": "Dokument aktualisieren", "fr": "Modifier document"},
+ "label": "Dokument aktualisieren",
"meta": {"endpoint": "/api/trustee/{instanceId}/documents/{documentId}", "method": "PUT"}
},
{
"objectKey": "resource.feature.trustee.documents.delete",
- "label": {"en": "Delete Document", "de": "Dokument löschen", "fr": "Supprimer document"},
+ "label": "Dokument löschen",
"meta": {"endpoint": "/api/trustee/{instanceId}/documents/{documentId}", "method": "DELETE"}
},
{
"objectKey": "resource.feature.trustee.positions.create",
- "label": {"en": "Create Position", "de": "Position erstellen", "fr": "Créer position"},
+ "label": "Position erstellen",
"meta": {"endpoint": "/api/trustee/{instanceId}/positions", "method": "POST"}
},
{
"objectKey": "resource.feature.trustee.positions.update",
- "label": {"en": "Update Position", "de": "Position aktualisieren", "fr": "Modifier position"},
+ "label": "Position aktualisieren",
"meta": {"endpoint": "/api/trustee/{instanceId}/positions/{positionId}", "method": "PUT"}
},
{
"objectKey": "resource.feature.trustee.positions.delete",
- "label": {"en": "Delete Position", "de": "Position löschen", "fr": "Supprimer position"},
+ "label": "Position löschen",
"meta": {"endpoint": "/api/trustee/{instanceId}/positions/{positionId}", "method": "DELETE"}
},
{
"objectKey": "resource.feature.trustee.instance-roles.manage",
- "label": {"en": "Manage Instance Roles", "de": "Instanz-Rollen verwalten", "fr": "Gérer les rôles d'instance"},
+ "label": "Instanz-Rollen verwalten",
"meta": {"endpoint": "/api/trustee/{instanceId}/instance-roles", "method": "ALL", "admin_only": True}
},
{
"objectKey": "resource.feature.trustee.accounting.manage",
- "label": {"en": "Manage Accounting Integration", "de": "Buchhaltungs-Integration verwalten", "fr": "Gérer l'intégration comptable"},
+ "label": "Buchhaltungs-Integration verwalten",
"meta": {"endpoint": "/api/trustee/{instanceId}/accounting/config", "method": "ALL", "admin_only": True}
},
{
"objectKey": "resource.feature.trustee.accounting.sync",
- "label": {"en": "Sync to Accounting", "de": "Buchhaltung synchronisieren", "fr": "Synchroniser la comptabilité"},
+ "label": "Buchhaltung synchronisieren",
"meta": {"endpoint": "/api/trustee/{instanceId}/accounting/sync", "method": "POST"}
},
{
"objectKey": "resource.feature.trustee.accounting.view",
- "label": {"en": "View Sync Status", "de": "Sync-Status einsehen", "fr": "Voir le statut de synchronisation"},
+ "label": "Sync-Status einsehen",
"meta": {"endpoint": "/api/trustee/{instanceId}/accounting/sync-status", "method": "GET"}
},
+ {
+ "objectKey": "resource.feature.trustee.workflows.view",
+ "label": "Workflows einsehen",
+ "meta": {"endpoint": "/api/workflows/{instanceId}/workflows", "method": "GET"}
+ },
+ {
+ "objectKey": "resource.feature.trustee.workflows.execute",
+ "label": "Workflows ausführen",
+ "meta": {"endpoint": "/api/workflows/{instanceId}/execute", "method": "POST"}
+ },
+ {
+ "objectKey": "resource.feature.trustee.workflows.manage",
+ "label": "Workflows verwalten",
+ "meta": {"endpoint": "/api/workflows/{instanceId}/workflows", "method": "ALL", "admin_only": True}
+ },
]
# Template roles for this feature with AccessRules
# Each role defines default UI and DATA permissions
# Note: UI item=None means ALL views, specific items restrict to named views
# IMPORTANT: item uses vollqualifizierte ObjectKeys (gemäss Navigation-API-Konzept)
+QUICK_ACTION_CATEGORIES = [
+ {"id": "import", "label": "Import & Verarbeitung", "sortOrder": 1},
+ {"id": "analyse", "label": "Analyse & Reporting", "sortOrder": 2},
+ {"id": "abschluss", "label": "Abschluss & Prüfung", "sortOrder": 3},
+]
+
+QUICK_ACTIONS = [
+ {
+ "id": "trustee-process-receipts",
+ "label": "Belege verarbeiten",
+ "description": "Belege aus SharePoint importieren, klassifizieren und verbuchen",
+ "icon": "mdi-file-document-check-outline",
+ "color": "#4CAF50",
+ "category": "import",
+ "actionType": "link",
+ "config": {"targetView": "expense-import"},
+ "requiredRoles": ["trustee-user", "trustee-accountant", "trustee-admin"],
+ "sortOrder": 1,
+ },
+ {
+ "id": "trustee-sync-accounting",
+ "label": "Daten synchronisieren",
+ "description": "Buchhaltungsdaten aus dem externen System aktualisieren",
+ "icon": "mdi-sync",
+ "color": "#FF9800",
+ "category": "import",
+ "actionType": "link",
+ "config": {"targetView": "settings"},
+ "requiredRoles": ["trustee-accountant", "trustee-admin"],
+ "sortOrder": 2,
+ },
+ {
+ "id": "trustee-upload-receipt",
+ "label": "Beleg hochladen",
+ "description": "Beleg scannen oder als Datei hochladen",
+ "icon": "mdi-camera-document-outline",
+ "color": "#607D8B",
+ "category": "import",
+ "actionType": "link",
+ "config": {"targetView": "scan-upload"},
+ "requiredRoles": ["trustee-user", "trustee-client", "trustee-accountant", "trustee-admin"],
+ "sortOrder": 3,
+ },
+ {
+ "id": "trustee-budget-comparison",
+ "label": "Budget-Vergleich",
+ "description": "Soll/Ist-Vergleich der Buchhaltung mit Budget-Excel",
+ "icon": "mdi-chart-bar",
+ "color": "#2196F3",
+ "category": "analyse",
+ "actionType": "link",
+ "config": {"targetView": "analyse", "tab": "budget"},
+ "requiredRoles": ["trustee-accountant", "trustee-admin"],
+ "sortOrder": 4,
+ },
+ {
+ "id": "trustee-kpi-dashboard",
+ "label": "KPI-Dashboard",
+ "description": "Kennzahlen berechnen und visualisieren",
+ "icon": "mdi-view-dashboard-outline",
+ "color": "#9C27B0",
+ "category": "analyse",
+ "actionType": "link",
+ "config": {"targetView": "analyse", "tab": "kpi"},
+ "requiredRoles": ["trustee-accountant", "trustee-admin"],
+ "sortOrder": 5,
+ },
+ {
+ "id": "trustee-cashflow",
+ "label": "Cashflow-Rechnung",
+ "description": "Cashflow berechnen und analysieren",
+ "icon": "mdi-cash-multiple",
+ "color": "#009688",
+ "category": "analyse",
+ "actionType": "link",
+ "config": {"targetView": "analyse", "tab": "cashflow"},
+ "requiredRoles": ["trustee-accountant", "trustee-admin"],
+ "sortOrder": 6,
+ },
+ {
+ "id": "trustee-forecast",
+ "label": "Prognose erstellen",
+ "description": "Trend-Analyse und Prognose der nächsten Monate",
+ "icon": "mdi-chart-timeline-variant",
+ "color": "#E91E63",
+ "category": "analyse",
+ "actionType": "link",
+ "config": {"targetView": "analyse", "tab": "forecast"},
+ "requiredRoles": ["trustee-accountant", "trustee-admin"],
+ "sortOrder": 7,
+ },
+ {
+ "id": "trustee-year-end-check",
+ "label": "Jahresabschluss prüfen",
+ "description": "Automatische Prüfungen für den Jahresabschluss",
+ "icon": "mdi-clipboard-check-outline",
+ "color": "#795548",
+ "category": "abschluss",
+ "actionType": "link",
+ "config": {"targetView": "abschluss", "tab": "year-end"},
+ "requiredRoles": ["trustee-accountant", "trustee-admin"],
+ "sortOrder": 8,
+ },
+]
+
+
+# ---------------------------------------------------------------------------
+# Template Workflows — bootstrapped into each new feature instance.
+# Graphs use existing nodes: trigger.manual, trustee.refreshAccountingData, ai.prompt.
+# The placeholder {{featureInstanceId}} is replaced by _copyTemplateWorkflows.
+# ---------------------------------------------------------------------------
+
+def _buildAnalysisWorkflowGraph(prompt: str) -> Dict[str, Any]:
+ """Build a standard analysis graph: trigger → refreshAccountingData → ai.prompt."""
+ return {
+ "nodes": [
+ {"id": "trigger", "type": "trigger.manual", "label": "Start", "_method": "", "_action": "", "parameters": {}, "position": {"x": 0, "y": 0}},
+ {"id": "refresh", "type": "trustee.refreshAccountingData", "label": "Daten laden", "_method": "trustee", "_action": "refreshAccountingData",
+ "parameters": {"featureInstanceId": "{{featureInstanceId}}", "forceRefresh": False}, "position": {"x": 250, "y": 0}},
+ {"id": "analyse", "type": "ai.prompt", "label": "Analyse", "_method": "ai", "_action": "process",
+ "parameters": {"prompt": prompt, "simpleMode": False}, "position": {"x": 500, "y": 0}},
+ ],
+ "connections": [
+ {"source": "trigger", "sourcePort": 0, "target": "refresh", "targetPort": 0},
+ {"source": "refresh", "sourcePort": 0, "target": "analyse", "targetPort": 0},
+ ],
+ }
+
+
+TEMPLATE_WORKFLOWS = [
+ {
+ "id": "trustee-receipt-import",
+ "label": "Beleg-Import Pipeline",
+ "description": "Belege extrahieren, verarbeiten und in Buchhaltung synchronisieren",
+ "tags": ["feature:trustee", "template:trustee-receipt-import"],
+ "graph": {
+ "nodes": [
+ {"id": "trigger", "type": "trigger.manual", "label": "Start", "_method": "", "_action": "", "parameters": {}, "position": {"x": 0, "y": 0}},
+ {"id": "extract", "type": "trustee.extractFromFiles", "label": "Dokumente extrahieren", "_method": "trustee", "_action": "extractFromFiles",
+ "parameters": {"featureInstanceId": "{{featureInstanceId}}", "prompt": ""}, "position": {"x": 250, "y": 0}},
+ {"id": "process", "type": "trustee.processDocuments", "label": "Verarbeiten", "_method": "trustee", "_action": "processDocuments",
+ "parameters": {"documentList": [], "featureInstanceId": "{{featureInstanceId}}"}, "position": {"x": 500, "y": 0}},
+ {"id": "sync", "type": "trustee.syncToAccounting", "label": "Synchronisieren", "_method": "trustee", "_action": "syncToAccounting",
+ "parameters": {"documentList": [], "featureInstanceId": "{{featureInstanceId}}"}, "position": {"x": 750, "y": 0}},
+ ],
+ "connections": [
+ {"source": "trigger", "sourcePort": 0, "target": "extract", "targetPort": 0},
+ {"source": "extract", "sourcePort": 0, "target": "process", "targetPort": 0},
+ {"source": "process", "sourcePort": 0, "target": "sync", "targetPort": 0},
+ ],
+ },
+ },
+ {
+ "id": "trustee-sync-accounting",
+ "label": "Buchhaltung synchronisieren",
+ "description": "Buchhaltungsdaten aus dem externen System aktualisieren",
+ "tags": ["feature:trustee", "template:trustee-sync-accounting"],
+ "graph": {
+ "nodes": [
+ {"id": "trigger", "type": "trigger.manual", "label": "Start", "_method": "", "_action": "", "parameters": {}, "position": {"x": 0, "y": 0}},
+ {"id": "refresh", "type": "trustee.refreshAccountingData", "label": "Daten aktualisieren", "_method": "trustee", "_action": "refreshAccountingData",
+ "parameters": {"featureInstanceId": "{{featureInstanceId}}", "forceRefresh": True}, "position": {"x": 250, "y": 0}},
+ ],
+ "connections": [
+ {"source": "trigger", "sourcePort": 0, "target": "refresh", "targetPort": 0},
+ ],
+ },
+ },
+ {
+ "id": "trustee-budget-comparison",
+ "label": "Budget-Vergleich",
+ "description": "Soll/Ist-Vergleich der Buchhaltung mit Budget-Excel",
+ "tags": ["feature:trustee", "template:trustee-budget-comparison"],
+ "graph": _buildAnalysisWorkflowGraph(
+ "Ich möchte einen Budget-Soll/Ist-Vergleich durchführen. Bitte:\n"
+ "1. Frage mich nach der Budget-Datei (Excel) oder suche im Workspace nach einer Datei mit 'Budget' im Namen\n"
+ "2. Lade die aktuellen Buchhaltungsdaten (refreshTrusteeData falls nötig)\n"
+ "3. Vergleiche die Soll-Werte aus dem Budget mit den Ist-Werten aus der Buchhaltung pro Konto\n"
+ "4. Berechne die Abweichung (absolut und prozentual)\n"
+ "5. Erstelle ein Abweichungs-Chart (Balkendiagramm: Soll vs. Ist pro Konto)\n"
+ "6. Markiere kritische Abweichungen (>10%) und gib eine kurze Einschätzung"
+ ),
+ },
+ {
+ "id": "trustee-kpi-dashboard",
+ "label": "KPI-Dashboard",
+ "description": "Kennzahlen berechnen und visualisieren",
+ "tags": ["feature:trustee", "template:trustee-kpi-dashboard"],
+ "graph": _buildAnalysisWorkflowGraph(
+ "Erstelle ein KPI-Dashboard basierend auf den aktuellen Buchhaltungsdaten. Berechne und visualisiere:\n"
+ "1. Bruttogewinn und Bruttogewinnmarge\n"
+ "2. EBIT (Betriebsergebnis)\n"
+ "3. Gewinnmarge (Reingewinn / Umsatz)\n"
+ "4. Eigenkapitalquote und Check auf hälftigen Kapitalverlust (OR Art. 725)\n"
+ "5. Liquiditätsgrad 1-3 (Cash Ratio, Quick Ratio, Current Ratio)\n"
+ "6. Überschuldungs-Check\n\n"
+ "Erstelle für jede Kennzahl einen kurzen Kommentar (gut/kritisch/Handlungsbedarf). "
+ "Erstelle mindestens 2 Charts: ein Übersichts-Chart der Margen und ein Liquiditäts-Chart."
+ ),
+ },
+ {
+ "id": "trustee-cashflow",
+ "label": "Cashflow-Rechnung",
+ "description": "Cashflow berechnen und analysieren",
+ "tags": ["feature:trustee", "template:trustee-cashflow"],
+ "graph": _buildAnalysisWorkflowGraph(
+ "Erstelle eine Cashflow-Rechnung basierend auf den aktuellen Buchhaltungsdaten:\n"
+ "1. Operativer Cashflow: Starte vom Reingewinn, bereinige um nicht-cash-wirksame Positionen\n"
+ "2. Investitions-Cashflow: Investitionen in Sachanlagen, Finanzanlagen\n"
+ "3. Finanzierungs-Cashflow: Darlehensaufnahmen/-rückzahlungen, Dividenden, Kapitalerhöhungen\n"
+ "4. Netto-Cashflow und Veränderung der liquiden Mittel\n\n"
+ "Warne bei kritischen Werten. Erstelle ein Wasserfall-Chart oder gestapeltes Balkendiagramm."
+ ),
+ },
+ {
+ "id": "trustee-forecast",
+ "label": "Prognose erstellen",
+ "description": "Trend-Analyse und Prognose der nächsten Monate",
+ "tags": ["feature:trustee", "template:trustee-forecast"],
+ "graph": _buildAnalysisWorkflowGraph(
+ "Erstelle eine Finanzprognose basierend auf den historischen Buchhaltungsdaten:\n"
+ "1. Analysiere die Umsatz- und Aufwandsentwicklung der letzten 6 Monate\n"
+ "2. Identifiziere Trends und Saisonalitäten\n"
+ "3. Prognostiziere Umsatz, Aufwand und Gewinn für die nächsten 3 Monate\n"
+ "4. Erstelle ein Chart mit Ist-Werten und Prognose-Korridor\n"
+ "5. Markiere Risiken\n\n"
+ "Nutze eine einfache lineare Extrapolation mit Saisonalitätskorrektur wo sinnvoll."
+ ),
+ },
+ {
+ "id": "trustee-year-end-check",
+ "label": "Jahresabschluss prüfen",
+ "description": "Automatische Prüfungen für den Jahresabschluss",
+ "tags": ["feature:trustee", "template:trustee-year-end-check"],
+ "graph": _buildAnalysisWorkflowGraph(
+ "Führe eine automatische Jahresabschluss-Prüfung durch:\n"
+ "1. Saldovalidierung: Prüfe alle Bilanzkonten auf Plausibilität\n"
+ "2. Vorjahresvergleich: Vergleiche Bilanz- und ER-Positionen mit dem Vorjahr, markiere Abweichungen >20%\n"
+ "3. Abgrenzungen: Identifiziere potenzielle transitorische Aktiven/Passiven\n"
+ "4. Gesetzliche Prüfungen: Hälftiger Kapitalverlust (OR 725), Überschuldung, Mindestkapital\n"
+ "5. MWST-Plausibilisierung: Vorsteuer vs. geschätzter Aufwand, Umsatzsteuer vs. Umsatz\n\n"
+ "Erstelle eine Checkliste mit Status (OK / Warnung / Kritisch) pro Prüfpunkt."
+ ),
+ },
+]
+
+
TEMPLATE_ROLES = [
{
"roleLabel": "trustee-viewer",
- "description": {
- "en": "Trustee Viewer - View trustee data (read-only)",
- "de": "Treuhand-Betrachter - Treuhand-Daten einsehen (nur lesen)",
- "fr": "Visualiseur fiduciaire - Consulter les données fiduciaires (lecture seule)",
- },
+ "description": "Treuhand-Betrachter - Treuhand-Daten einsehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.view", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "n", "update": "n", "delete": "n"},
],
},
{
"roleLabel": "trustee-user",
- "description": {
- "en": "Trustee User - Create and manage own trustee records",
- "de": "Treuhand-Benutzer - Eigene Treuhand-Daten erstellen und verwalten",
- "fr": "Utilisateur fiduciaire - Créer et gérer ses propres données fiduciaires",
- },
+ "description": "Treuhand-Benutzer - Eigene Treuhand-Daten erstellen und verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
{"context": "UI", "item": "ui.feature.trustee.expense-import", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.view", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.execute", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "m", "create": "m", "update": "m", "delete": "n"},
],
},
{
"roleLabel": "trustee-admin",
- "description": {
- "en": "Trustee Administrator - Full access to all trustee data and settings",
- "de": "Treuhand-Administrator - Vollzugriff auf alle Treuhand-Daten und Einstellungen",
- "fr": "Administrateur fiduciaire - Accès complet aux données et paramètres fiduciaires",
- },
+ "description": "Treuhand-Administrator - Vollzugriff auf alle Treuhand-Daten und Einstellungen",
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "DATA", "item": None, "view": True, "read": "a", "create": "a", "update": "a", "delete": "a"},
{"context": "RESOURCE", "item": "resource.feature.trustee.instance-roles.manage", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.view", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.execute", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.manage", "view": True},
],
},
{
"roleLabel": "trustee-accountant",
- "description": {
- "en": "Trustee Accountant - Manage accounting and financial data",
- "de": "Treuhand-Buchhalter - Buchhaltungs- und Finanzdaten verwalten",
- "fr": "Comptable fiduciaire - Gérer les données comptables et financières",
- },
+ "description": "Treuhand-Buchhalter - Buchhaltungs- und Finanzdaten verwalten",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
{"context": "UI", "item": "ui.feature.trustee.documents", "view": True},
+ {"context": "UI", "item": "ui.feature.trustee.analyse", "view": True},
+ {"context": "UI", "item": "ui.feature.trustee.abschluss", "view": True},
{"context": "UI", "item": "ui.feature.trustee.settings", "view": True},
{"context": "DATA", "item": None, "view": True, "read": "g", "create": "g", "update": "g", "delete": "g"},
{"context": "RESOURCE", "item": "resource.feature.trustee.accounting.sync", "view": True},
{"context": "RESOURCE", "item": "resource.feature.trustee.accounting.view", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.view", "view": True},
+ {"context": "RESOURCE", "item": "resource.feature.trustee.workflows.execute", "view": True},
],
},
{
"roleLabel": "trustee-client",
- "description": {
- "en": "Trustee Client - View own accounting data and documents",
- "de": "Treuhand-Kunde - Eigene Buchhaltungsdaten und Dokumente einsehen",
- "fr": "Client fiduciaire - Consulter ses propres données comptables et documents",
- },
+ "description": "Treuhand-Kunde - Eigene Buchhaltungsdaten und Dokumente einsehen",
"accessRules": [
{"context": "UI", "item": "ui.feature.trustee.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.trustee.positions", "view": True},
@@ -293,6 +555,21 @@ def getTemplateRoles() -> List[Dict[str, Any]]:
return TEMPLATE_ROLES
+def getTemplateWorkflows() -> List[Dict[str, Any]]:
+ """Return template workflow definitions for bootstrap on instance creation."""
+ return TEMPLATE_WORKFLOWS
+
+
+def getQuickActions() -> List[Dict[str, Any]]:
+ """Return quick action definitions for the Trustee dashboard."""
+ return QUICK_ACTIONS
+
+
+def getQuickActionCategories() -> List[Dict[str, Any]]:
+ """Return quick action category definitions."""
+ return QUICK_ACTION_CATEGORIES
+
+
def getDataObjects() -> List[Dict[str, Any]]:
"""Return DATA objects for RBAC catalog registration."""
return DATA_OBJECTS
@@ -358,7 +635,8 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
-
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
+
rootInterface = getRootInterface()
# Get existing template roles for this feature (Pydantic models)
@@ -378,7 +656,7 @@ def _syncTemplateRolesToDb() -> int:
# Create new template role
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None, # Global template
featureInstanceId=None,
diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py
index 13b28b07..9695c7bb 100644
--- a/modules/features/trustee/routeFeatureTrustee.py
+++ b/modules/features/trustee/routeFeatureTrustee.py
@@ -37,6 +37,10 @@ from modules.datamodels.datamodelPagination import (
PaginationMetadata,
normalize_pagination_dict,
)
+from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
+from modules.shared.i18nRegistry import apiRouteContext
+
+routeApiMsg = apiRouteContext("routeFeatureTrustee")
logger = logging.getLogger(__name__)
@@ -116,6 +120,73 @@ def _validateInstanceAccess(instanceId: str, context: RequestContext) -> str:
return str(instance.mandateId)
+# ============================================================================
+# QUICK ACTIONS ENDPOINT
+# ============================================================================
+
+@router.get("/{instanceId}/quick-actions")
+@limiter.limit("60/minute")
+def getQuickActions(
+ request: Request,
+ instanceId: str = Path(..., description="Feature Instance ID"),
+ language: str = Query(default="de", description="Language code for labels"),
+ context: RequestContext = Depends(getRequestContext),
+) -> Dict[str, Any]:
+ """Return RBAC-filtered quick actions for the Trustee dashboard."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+
+ from .mainTrustee import QUICK_ACTIONS, QUICK_ACTION_CATEGORIES
+
+ userRoleLabels: set = set()
+ if context.hasSysAdminRole:
+ userRoleLabels.add("trustee-admin")
+ else:
+ rootInterface = getRootInterface()
+ featureAccesses = rootInterface.getFeatureAccessesForUser(str(context.user.id))
+ for fa in featureAccesses:
+ if str(fa.featureInstanceId) == instanceId and fa.enabled:
+ roleIds = fa.roleIds if hasattr(fa, "roleIds") and fa.roleIds else []
+ for rid in roleIds:
+ role = rootInterface.getRole(str(rid))
+ if role and role.roleLabel:
+ userRoleLabels.add(role.roleLabel)
+
+ from modules.shared.i18nRegistry import resolveText
+
+ filteredActions = []
+ for action in QUICK_ACTIONS:
+ required = set(action.get("requiredRoles", []))
+ if not userRoleLabels and not context.hasSysAdminRole:
+ continue
+ if context.hasSysAdminRole or required.intersection(userRoleLabels):
+ resolved = {
+ "id": action["id"],
+ "label": resolveText(action.get("label", {})),
+ "description": resolveText(action.get("description", {})),
+ "icon": action.get("icon", ""),
+ "color": action.get("color", ""),
+ "category": action.get("category", ""),
+ "actionType": action.get("actionType", ""),
+ "config": action.get("config", {}),
+ "sortOrder": action.get("sortOrder", 99),
+ }
+ if resolved["actionType"] == "agentPrompt" and "config" in resolved:
+ cfg = dict(resolved["config"])
+ if "uploadHint" in cfg:
+ cfg["uploadHint"] = resolveText(cfg["uploadHint"])
+ resolved["config"] = cfg
+ filteredActions.append(resolved)
+
+ filteredActions.sort(key=lambda a: a["sortOrder"])
+
+ resolvedCategories = [
+ {"id": c["id"], "label": resolveText(c.get("label", {})), "sortOrder": c.get("sortOrder", 99)}
+ for c in QUICK_ACTION_CATEGORIES
+ ]
+
+ return {"actions": filteredActions, "categories": resolvedCategories}
+
+
# ============================================================================
# ATTRIBUTES ENDPOINT (for FormGeneratorTable)
# ============================================================================
@@ -385,7 +456,7 @@ def create_organisation(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createOrganisation(data.model_dump())
if not result:
- raise HTTPException(status_code=400, detail="Failed to create organisation")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create organisation"))
return result
@@ -408,7 +479,7 @@ def update_organisation(
result = interface.updateOrganisation(orgId, data.model_dump(exclude={"id"}))
if not result:
- raise HTTPException(status_code=400, detail="Failed to update organisation")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to update organisation"))
return result
@@ -430,7 +501,7 @@ def delete_organisation(
success = interface.deleteOrganisation(orgId)
if not success:
- raise HTTPException(status_code=400, detail="Failed to delete organisation")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to delete organisation"))
return {"message": f"Organisation {orgId} deleted"}
@@ -498,7 +569,7 @@ def create_role(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createRole(data.model_dump())
if not result:
- raise HTTPException(status_code=400, detail="Failed to create role")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create role"))
return result
@@ -521,7 +592,7 @@ def update_role(
result = interface.updateRole(roleId, data.model_dump(exclude={"id"}))
if not result:
- raise HTTPException(status_code=400, detail="Failed to update role")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to update role"))
return result
@@ -543,7 +614,7 @@ def delete_role(
success = interface.deleteRole(roleId)
if not success:
- raise HTTPException(status_code=400, detail="Failed to delete role (may be in use)")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to delete role (may be in use)"))
return {"message": f"Role {roleId} deleted"}
@@ -641,7 +712,7 @@ def create_access(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createAccess(data.model_dump())
if not result:
- raise HTTPException(status_code=400, detail="Failed to create access")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create access"))
return result
@@ -664,7 +735,7 @@ def update_access(
result = interface.updateAccess(accessId, data.model_dump(exclude={"id"}))
if not result:
- raise HTTPException(status_code=400, detail="Failed to update access")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to update access"))
return result
@@ -686,7 +757,7 @@ def delete_access(
success = interface.deleteAccess(accessId)
if not success:
- raise HTTPException(status_code=400, detail="Failed to delete access")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to delete access"))
return {"message": f"Access {accessId} deleted"}
@@ -769,7 +840,7 @@ def create_contract(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createContract(data.model_dump())
if not result:
- raise HTTPException(status_code=400, detail="Failed to create contract")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create contract"))
return result
@@ -792,7 +863,7 @@ def update_contract(
result = interface.updateContract(contractId, data.model_dump(exclude={"id"}))
if not result:
- raise HTTPException(status_code=400, detail="Failed to update contract (organisationId cannot be changed)")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to update contract (organisationId cannot be changed)"))
return result
@@ -814,7 +885,7 @@ def delete_contract(
success = interface.deleteContract(contractId)
if not success:
- raise HTTPException(status_code=400, detail="Failed to delete contract")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to delete contract"))
return {"message": f"Contract {contractId} deleted"}
@@ -938,7 +1009,7 @@ def get_document_data(
data = interface.getDocumentData(documentId)
if not data:
- raise HTTPException(status_code=404, detail="Document data not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Document data not found"))
return StreamingResponse(
io.BytesIO(data),
@@ -995,7 +1066,7 @@ async def create_document(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createDocument(body)
if not result:
- raise HTTPException(status_code=400, detail="Failed to create document")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create document"))
return result
@@ -1025,7 +1096,7 @@ async def upload_document(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createDocument(docData)
if not result:
- raise HTTPException(status_code=400, detail="Failed to create document")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create document"))
return result
@@ -1048,7 +1119,7 @@ def update_document(
result = interface.updateDocument(documentId, data.model_dump(exclude={"id"}))
if not result:
- raise HTTPException(status_code=400, detail="Failed to update document")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to update document"))
return result
@@ -1070,7 +1141,7 @@ def delete_document(
success = interface.deleteDocument(documentId)
if not success:
- raise HTTPException(status_code=400, detail="Failed to delete document")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to delete document"))
return {"message": f"Document {documentId} deleted"}
@@ -1220,7 +1291,7 @@ def create_position(
interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
result = interface.createPosition(data.model_dump())
if not result:
- raise HTTPException(status_code=400, detail="Failed to create position")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to create position"))
return result
@@ -1243,7 +1314,7 @@ def update_position(
result = interface.updatePosition(positionId, data.model_dump(exclude={"id"}))
if not result:
- raise HTTPException(status_code=400, detail="Failed to update position")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to update position"))
return result
@@ -1265,7 +1336,7 @@ def delete_position(
success = interface.deletePosition(positionId)
if not success:
- raise HTTPException(status_code=400, detail="Failed to delete position")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Failed to delete position"))
return {"message": f"Position {positionId} deleted"}
@@ -1398,7 +1469,7 @@ async def save_accounting_config(
if not plainConfig:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="config is required for new integration (e.g. clientName, apiKey)."
+ detail=routeApiMsg("config is required for new integration (e.g. clientName, apiKey).")
)
encryptedConfig = encryptValue(json.dumps(plainConfig), keyName="accountingConfig")
@@ -1511,7 +1582,7 @@ async def sync_positions_to_accounting(
positionIds = data.get("positionIds", [])
if not positionIds:
- raise HTTPException(status_code=400, detail="positionIds required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("positionIds required"))
results = await bridge.pushBatchToAccounting(instanceId, positionIds)
failed = [r for r in results if not r.success]
@@ -1642,6 +1713,87 @@ def get_import_status(
return counts
+# ===== AI Data Cache =====
+
+@router.post("/{instanceId}/accounting/clear-cache")
+@limiter.limit("10/minute")
+def clear_ai_data_cache(
+ request: Request,
+ instanceId: str = Path(..., description="Feature Instance ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> Dict[str, Any]:
+ """Clear the AI feature-data query cache for this instance so the next AI query reads fresh DB data."""
+ _validateInstanceAccess(instanceId, context)
+ from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
+ removed = clearFeatureQueryCache(instanceId)
+ return {"cleared": removed, "featureInstanceId": instanceId}
+
+
+# ===== Data Export =====
+
+@router.get("/{instanceId}/accounting/export-data")
+@limiter.limit("3/minute")
+def export_accounting_data(
+ request: Request,
+ instanceId: str = Path(..., description="Feature Instance ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> Response:
+ """Export all TrusteeData* tables for this instance as a JSON download (admin only)."""
+ mandateId = _validateInstanceAccess(instanceId, context)
+
+ from .datamodelFeatureTrustee import (
+ TrusteeDataAccount,
+ TrusteeDataJournalEntry,
+ TrusteeDataJournalLine,
+ TrusteeDataContact,
+ TrusteeDataAccountBalance,
+ TrusteeAccountingConfig,
+ )
+ import time as _time
+
+ interface = getInterface(context.user, mandateId=mandateId, featureInstanceId=instanceId)
+ _filter = {"featureInstanceId": instanceId}
+
+ tables: Dict[str, Any] = {}
+ for tableName, model in [
+ ("TrusteeDataAccount", TrusteeDataAccount),
+ ("TrusteeDataJournalEntry", TrusteeDataJournalEntry),
+ ("TrusteeDataJournalLine", TrusteeDataJournalLine),
+ ("TrusteeDataContact", TrusteeDataContact),
+ ("TrusteeDataAccountBalance", TrusteeDataAccountBalance),
+ ]:
+ records = interface.db.getRecordset(model, recordFilter=_filter) or []
+ tables[tableName] = records
+
+ cfgRecords = interface.db.getRecordset(
+ TrusteeAccountingConfig,
+ recordFilter={"featureInstanceId": instanceId, "isActive": True},
+ )
+ syncInfo = {}
+ if cfgRecords:
+ cfg = cfgRecords[0]
+ syncInfo = {
+ "connectorType": cfg.get("connectorType", ""),
+ "lastSyncAt": cfg.get("lastSyncAt"),
+ "lastSyncStatus": cfg.get("lastSyncStatus", ""),
+ }
+
+ payload = {
+ "exportedAt": _time.time(),
+ "featureInstanceId": instanceId,
+ "mandateId": mandateId,
+ "syncInfo": syncInfo,
+ "tables": tables,
+ }
+
+ jsonBytes = json.dumps(payload, ensure_ascii=False, default=str).encode("utf-8")
+ return Response(
+ content=jsonBytes,
+ media_type="application/json",
+ headers={"Content-Disposition": f'attachment; filename="trustee_data_{instanceId[:8]}.json"'},
+ )
+
+
# ===== Position-Document Query =====
@router.get("/{instanceId}/positions/document/{documentId}", response_model=List[TrusteePosition])
@@ -1662,8 +1814,6 @@ def get_positions_by_document(
# ===== Instance Roles Management =====
# These endpoints allow feature admins to manage instance-specific roles and their AccessRules
-from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
-
def _validateInstanceAdmin(instanceId: str, context: RequestContext) -> str:
"""
@@ -1695,7 +1845,7 @@ def _validateInstanceAdmin(instanceId: str, context: RequestContext) -> str:
if not hasAdminPermission:
raise HTTPException(
status_code=403,
- detail="Keine Berechtigung zur Rollenverwaltung"
+ detail=routeApiMsg("Keine Berechtigung zur Rollenverwaltung")
)
return mandateId
diff --git a/modules/features/workspace/datamodelFeatureWorkspace.py b/modules/features/workspace/datamodelFeatureWorkspace.py
index d7c292db..b01f0427 100644
--- a/modules/features/workspace/datamodelFeatureWorkspace.py
+++ b/modules/features/workspace/datamodelFeatureWorkspace.py
@@ -5,27 +5,32 @@
from typing import Optional
from pydantic import BaseModel, Field
from modules.datamodels.datamodelBase import PowerOnModel
-from modules.shared.attributeUtils import registerModelLabels
+from modules.shared.i18nRegistry import i18nModel
import uuid
+@i18nModel("Workspace Benutzereinstellungen")
class WorkspaceUserSettings(PowerOnModel):
- """Per-user workspace settings. None values mean 'use instance default'."""
- id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Primary key", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False})
- userId: str = Field(description="User ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- mandateId: str = Field(description="Mandate ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- featureInstanceId: str = Field(description="Feature Instance ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True})
- maxAgentRounds: Optional[int] = Field(default=None, description="Max agent rounds override (None = instance default)", json_schema_extra={"frontend_type": "number", "frontend_readonly": False, "frontend_required": False})
-
-
-registerModelLabels(
- "WorkspaceUserSettings",
- {"en": "Workspace User Settings", "de": "Workspace Benutzereinstellungen"},
- {
- "id": {"en": "ID", "de": "ID"},
- "userId": {"en": "User ID", "de": "Benutzer-ID"},
- "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID"},
- "featureInstanceId": {"en": "Feature Instance ID", "de": "Feature-Instanz-ID"},
- "maxAgentRounds": {"en": "Max Agent Rounds", "de": "Max. Agenten-Runden"},
- },
-)
+ """Benutzerspezifische Workspace-Einstellungen. None = Instanz-Standard."""
+ id: str = Field(
+ default_factory=lambda: str(uuid.uuid4()),
+ description="Primary key",
+ json_schema_extra={"label": "ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": False},
+ )
+ userId: str = Field(
+ description="User ID",
+ json_schema_extra={"label": "Benutzer-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ mandateId: str = Field(
+ description="Mandate ID",
+ json_schema_extra={"label": "Mandanten-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ featureInstanceId: str = Field(
+ description="Feature Instance ID",
+ json_schema_extra={"label": "Feature-Instanz-ID", "frontend_type": "text", "frontend_readonly": True, "frontend_required": True},
+ )
+ maxAgentRounds: Optional[int] = Field(
+ default=None,
+ description="Max agent rounds override (None = instance default)",
+ json_schema_extra={"label": "Max. Agenten-Runden", "frontend_type": "number", "frontend_readonly": False, "frontend_required": False},
+ )
diff --git a/modules/features/workspace/mainWorkspace.py b/modules/features/workspace/mainWorkspace.py
index 5ef9b399..bb501f21 100644
--- a/modules/features/workspace/mainWorkspace.py
+++ b/modules/features/workspace/mainWorkspace.py
@@ -12,32 +12,28 @@ from typing import Dict, List, Any
logger = logging.getLogger(__name__)
FEATURE_CODE = "workspace"
-FEATURE_LABEL = {"en": "AI Workspace", "de": "AI Workspace", "fr": "AI Workspace"}
+FEATURE_LABEL = "AI Workspace"
FEATURE_ICON = "mdi-brain"
UI_OBJECTS = [
{
"objectKey": "ui.feature.workspace.dashboard",
- "label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"},
+ "label": "Dashboard",
"meta": {"area": "dashboard"}
},
{
"objectKey": "ui.feature.workspace.editor",
- "label": {"en": "Editor", "de": "Editor", "fr": "Editeur"},
+ "label": "Editor",
"meta": {"area": "editor"}
},
{
"objectKey": "ui.feature.workspace.settings",
- "label": {"en": "Settings", "de": "Einstellungen", "fr": "Parametres"},
+ "label": "Einstellungen",
"meta": {"area": "settings"}
},
{
"objectKey": "ui.feature.workspace.rag-insights",
- "label": {
- "en": "Knowledge insights",
- "de": "Wissens-Insights",
- "fr": "Aperçu des connaissances",
- },
+ "label": "Wissens-Insights",
"meta": {"area": "rag-insights"},
},
]
@@ -45,37 +41,37 @@ UI_OBJECTS = [
RESOURCE_OBJECTS = [
{
"objectKey": "resource.feature.workspace.start",
- "label": {"en": "Start Agent", "de": "Agent starten", "fr": "Demarrer agent"},
+ "label": "Agent starten",
"meta": {"endpoint": "/api/workspace/{instanceId}/start/stream", "method": "POST"}
},
{
"objectKey": "resource.feature.workspace.stop",
- "label": {"en": "Stop Agent", "de": "Agent stoppen", "fr": "Arreter agent"},
+ "label": "Agent stoppen",
"meta": {"endpoint": "/api/workspace/{instanceId}/{workflowId}/stop", "method": "POST"}
},
{
"objectKey": "resource.feature.workspace.files",
- "label": {"en": "Manage Files", "de": "Dateien verwalten", "fr": "Gerer fichiers"},
+ "label": "Dateien verwalten",
"meta": {"endpoint": "/api/workspace/{instanceId}/files", "method": "GET"}
},
{
"objectKey": "resource.feature.workspace.folders",
- "label": {"en": "Manage Folders", "de": "Ordner verwalten", "fr": "Gerer dossiers"},
+ "label": "Ordner verwalten",
"meta": {"endpoint": "/api/workspace/{instanceId}/folders", "method": "GET"}
},
{
"objectKey": "resource.feature.workspace.datasources",
- "label": {"en": "Data Sources", "de": "Datenquellen", "fr": "Sources de donnees"},
+ "label": "Datenquellen",
"meta": {"endpoint": "/api/workspace/{instanceId}/datasources", "method": "GET"}
},
{
"objectKey": "resource.feature.workspace.voice",
- "label": {"en": "Voice Input/Output", "de": "Spracheingabe/-ausgabe", "fr": "Entree/sortie vocale"},
+ "label": "Spracheingabe/-ausgabe",
"meta": {"endpoint": "/api/workspace/{instanceId}/voice/*", "method": "POST"}
},
{
"objectKey": "resource.feature.workspace.edits",
- "label": {"en": "Review File Edits", "de": "Datei-Aenderungen pruefen", "fr": "Verifier les modifications de fichiers"},
+ "label": "Datei-Aenderungen pruefen",
"meta": {"endpoint": "/api/workspace/{instanceId}/edit/*", "method": "POST"}
},
]
@@ -83,11 +79,7 @@ RESOURCE_OBJECTS = [
TEMPLATE_ROLES = [
{
"roleLabel": "workspace-viewer",
- "description": {
- "en": "Workspace Viewer - View workspace (read-only)",
- "de": "Workspace Betrachter - Workspace ansehen (nur lesen)",
- "fr": "Visualiseur Workspace - Consulter le workspace (lecture seule)"
- },
+ "description": "Workspace Betrachter - Workspace ansehen (nur lesen)",
"accessRules": [
{"context": "UI", "item": "ui.feature.workspace.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.workspace.editor", "view": True},
@@ -98,11 +90,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "workspace-user",
- "description": {
- "en": "Workspace User - Use AI workspace and tools",
- "de": "Workspace Benutzer - AI Workspace und Tools nutzen",
- "fr": "Utilisateur Workspace - Utiliser l'espace de travail AI et les outils"
- },
+ "description": "Workspace Benutzer - AI Workspace und Tools nutzen",
"accessRules": [
{"context": "UI", "item": "ui.feature.workspace.dashboard", "view": True},
{"context": "UI", "item": "ui.feature.workspace.editor", "view": True},
@@ -120,11 +108,7 @@ TEMPLATE_ROLES = [
},
{
"roleLabel": "workspace-admin",
- "description": {
- "en": "Workspace Admin - All UI and API actions; data is always scoped to own records (same privacy as users)",
- "de": "Workspace Admin - Alle UI- und API-Aktionen; Daten immer nur eigene Datensätze (gleiche Privatsphäre wie User)",
- "fr": "Administrateur Workspace - Toute l'UI et les API; donnees limitees a ses propres enregistrements"
- },
+ "description": "Workspace Admin - Alle UI- und API-Aktionen; Daten immer nur eigene Datensätze (gleiche Privatsphäre wie User)",
"accessRules": [
{"context": "UI", "item": None, "view": True},
{"context": "RESOURCE", "item": None, "view": True},
@@ -194,6 +178,7 @@ def _syncTemplateRolesToDb() -> int:
try:
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
+ from modules.datamodels.datamodelUtils import coerce_text_multilingual
rootInterface = getRootInterface()
@@ -211,7 +196,7 @@ def _syncTemplateRolesToDb() -> int:
else:
newRole = Role(
roleLabel=roleLabel,
- description=roleTemplate.get("description", {}),
+ description=coerce_text_multilingual(roleTemplate.get("description", {})),
featureCode=FEATURE_CODE,
mandateId=None,
featureInstanceId=None,
diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py
index ae0154dc..08216e56 100644
--- a/modules/features/workspace/routeFeatureWorkspace.py
+++ b/modules/features/workspace/routeFeatureWorkspace.py
@@ -29,6 +29,8 @@ from modules.interfaces.interfaceAiObjects import AiObjects
from modules.serviceCenter.core.serviceStreaming import get_event_manager
from modules.serviceCenter.services.serviceAgent.datamodelAgent import AgentEventTypeEnum, PendingFileEdit
from modules.shared.timeUtils import parseTimestamp
+from modules.shared.i18nRegistry import apiRouteContext, resolveText
+routeApiMsg = apiRouteContext("routeFeatureWorkspace")
logger = logging.getLogger(__name__)
@@ -127,16 +129,17 @@ def _validateInstanceAccess(instanceId: str, context: RequestContext):
raise HTTPException(status_code=404, detail=f"Feature instance {instanceId} not found")
featureAccess = rootInterface.getFeatureAccess(str(context.user.id), instanceId)
if not featureAccess or not featureAccess.enabled:
- raise HTTPException(status_code=403, detail="Access denied to this feature instance")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied to this feature instance"))
mandateId = str(instance.mandateId) if instance.mandateId else None
instanceConfig = instance.config if hasattr(instance, "config") and instance.config else {}
return mandateId, instanceConfig
-def _getChatInterface(context: RequestContext, featureInstanceId: str = None):
+def _getChatInterface(context: RequestContext, featureInstanceId: str = None, mandateId: str = None):
+ effectiveMandateId = mandateId or (str(context.mandateId) if context.mandateId else None)
return interfaceDbChat.getInterface(
context.user,
- mandateId=str(context.mandateId) if context.mandateId else None,
+ mandateId=effectiveMandateId,
featureInstanceId=featureInstanceId,
)
@@ -235,6 +238,8 @@ def _buildFeatureDataSourceContext(featureDataSourceIds: List[str]) -> str:
parts = [
"The user has attached data from the following feature instances.",
"Use queryFeatureInstance(featureInstanceId, question) to query this data.",
+ "IMPORTANT: Formulate ONE comprehensive question per call that covers everything you need.",
+ "The sub-agent can browse, filter, and aggregate -- ask precisely and avoid repeated calls.",
"",
]
found = False
@@ -356,8 +361,6 @@ def _workspaceMessageToClientDict(msg: Any) -> Dict[str, Any]:
raw = dict(msg)
elif hasattr(msg, "model_dump"):
raw = msg.model_dump()
- elif hasattr(msg, "dict"):
- raw = msg.dict()
else:
raw = {
"id": getattr(msg, "id", None),
@@ -378,8 +381,6 @@ def _workspaceMessageToClientDict(msg: Any) -> Dict[str, Any]:
serialized_docs.append(doc)
elif hasattr(doc, "model_dump"):
serialized_docs.append(doc.model_dump())
- elif hasattr(doc, "dict"):
- serialized_docs.append(doc.dict())
else:
serialized_docs.append({
"id": getattr(doc, "id", ""),
@@ -543,7 +544,7 @@ async def streamWorkspaceStart(
):
"""Start or continue a Workspace session with SSE streaming via serviceAgent."""
mandateId, instanceConfig = _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=mandateId)
aiObjects = await _getAiObjects()
eventManager = get_event_manager()
@@ -907,7 +908,7 @@ async def stopWorkspace(
workflowId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
queueId = f"workspace-{workflowId}"
eventManager = get_event_manager()
cancelled = await eventManager.cancel_agent(queueId)
@@ -933,8 +934,8 @@ async def listWorkspaceWorkflows(
context: RequestContext = Depends(getRequestContext),
):
"""List workspace workflows/conversations for this instance."""
- _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
workflows = chatInterface.getWorkflows() or []
from modules.interfaces.interfaceDbApp import getRootInterface
@@ -1007,8 +1008,8 @@ async def resolveRag(
context: RequestContext = Depends(getRequestContext),
):
"""Build a RAG summary for a chat (workflow) to inject into the input area."""
- _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
messages = chatInterface.getMessages(body.chatId) or []
texts = []
@@ -1037,8 +1038,8 @@ async def patchWorkspaceWorkflow(
context: RequestContext = Depends(getRequestContext),
):
"""Update a workspace workflow (e.g. rename)."""
- _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
@@ -1071,8 +1072,8 @@ async def deleteWorkspaceWorkflow(
context: RequestContext = Depends(getRequestContext),
):
"""Delete a workspace workflow and its messages."""
- _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
workflow = chatInterface.getWorkflow(workflowId)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {workflowId} not found")
@@ -1089,8 +1090,8 @@ async def createWorkspaceWorkflow(
context: RequestContext = Depends(getRequestContext),
):
"""Create a new empty workspace workflow."""
- _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
name = body.get("name", "Neuer Chat")
workflow = chatInterface.createWorkflow({
"featureInstanceId": instanceId,
@@ -1112,8 +1113,8 @@ async def getWorkspaceMessages(
context: RequestContext = Depends(getRequestContext),
):
"""Get all messages for a workspace workflow/conversation."""
- _validateInstanceAccess(instanceId, context)
- chatInterface = _getChatInterface(context, featureInstanceId=instanceId)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
+ chatInterface = _getChatInterface(context, featureInstanceId=instanceId, mandateId=_mandateId)
messages = chatInterface.getMessages(workflowId) or []
items = [_workspaceMessageToClientDict(m) for m in messages]
items.sort(
@@ -1140,7 +1141,7 @@ async def listWorkspaceFiles(
search: Optional[str] = Query(None),
context: RequestContext = Depends(getRequestContext),
):
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
dbMgmt = _getDbManagement(context, featureInstanceId=instanceId)
files = dbMgmt.getAllFiles()
@@ -1172,7 +1173,7 @@ async def getFileContent(
):
"""Return the raw content of a file for preview."""
from fastapi.responses import Response
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
dbMgmt = _getDbManagement(context, featureInstanceId=instanceId)
fileRecord = dbMgmt.getFile(fileId)
if not fileRecord:
@@ -1180,10 +1181,10 @@ async def getFileContent(
fileData = fileRecord if isinstance(fileRecord, dict) else fileRecord.model_dump()
filePath = fileData.get("filePath")
if not filePath:
- raise HTTPException(status_code=404, detail="File has no stored path")
+ raise HTTPException(status_code=404, detail=routeApiMsg("File has no stored path"))
import os
if not os.path.isfile(filePath):
- raise HTTPException(status_code=404, detail="File not found on disk")
+ raise HTTPException(status_code=404, detail=routeApiMsg("File not found on disk"))
mimeType = fileData.get("mimeType", "application/octet-stream")
with open(filePath, "rb") as fh:
content = fh.read()
@@ -1198,13 +1199,13 @@ async def listWorkspaceFolders(
parentId: Optional[str] = Query(None),
context: RequestContext = Depends(getRequestContext),
):
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
try:
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
+ mandate_id=_mandateId or "",
feature_instance_id=instanceId,
)
chatService = getService("chat", ctx)
@@ -1221,17 +1222,15 @@ async def listWorkspaceDataSources(
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
- _validateInstanceAccess(instanceId, context)
+ wsMandateId, _ = _validateInstanceAccess(instanceId, context)
try:
- from modules.serviceCenter import getService
- from modules.serviceCenter.context import ServiceCenterContext
- ctx = ServiceCenterContext(
- user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
- feature_instance_id=instanceId,
- )
- chatService = getService("chat", ctx)
- dataSources = chatService.listDataSources(featureInstanceId=instanceId)
+ from modules.datamodels.datamodelDataSource import DataSource
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ rootIf = getRootInterface()
+ recordFilter: dict = {"featureInstanceId": instanceId}
+ if wsMandateId:
+ recordFilter["mandateId"] = wsMandateId
+ dataSources = rootIf.db.getRecordset(DataSource, recordFilter=recordFilter)
return JSONResponse({"dataSources": dataSources or []})
except Exception:
return JSONResponse({"dataSources": []})
@@ -1245,12 +1244,12 @@ async def listWorkspaceConnections(
context: RequestContext = Depends(getRequestContext),
):
"""Return the user's active connections (UserConnections)."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
+ mandate_id=_mandateId or "",
feature_instance_id=instanceId,
)
chatService = getService("chat", ctx)
@@ -1292,12 +1291,12 @@ async def createWorkspaceDataSource(
context: RequestContext = Depends(getRequestContext),
):
"""Create a new DataSource for this workspace instance."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
+ mandate_id=_mandateId or "",
feature_instance_id=instanceId,
)
chatService = getService("chat", ctx)
@@ -1321,12 +1320,12 @@ async def deleteWorkspaceDataSource(
context: RequestContext = Depends(getRequestContext),
):
"""Delete a DataSource."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
from modules.serviceCenter import getService
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
+ mandate_id=_mandateId or "",
feature_instance_id=instanceId,
)
chatService = getService("chat", ctx)
@@ -1440,11 +1439,11 @@ async def listFeatureConnectionTables(
rootIf = getRootInterface()
inst = rootIf.getFeatureInstance(fiId)
if not inst:
- raise HTTPException(status_code=404, detail="Feature instance not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Feature instance not found"))
mandateId = str(inst.mandateId) if inst.mandateId else None
if wsMandateId and mandateId and mandateId != wsMandateId:
- raise HTTPException(status_code=403, detail="Feature instance does not belong to workspace mandate")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Feature instance does not belong to workspace mandate"))
catalog = getCatalogService()
try:
@@ -1468,7 +1467,7 @@ async def listFeatureConnectionTables(
node = {
"objectKey": obj.get("objectKey", ""),
"tableName": meta.get("table", ""),
- "label": obj.get("label", {}),
+ "label": resolveText(obj.get("label", "")),
"fields": meta.get("fields", []),
}
if meta.get("isParent"):
@@ -1499,12 +1498,12 @@ async def listParentObjects(
rootIf = getRootInterface()
inst = rootIf.getFeatureInstance(fiId)
if not inst:
- raise HTTPException(status_code=404, detail="Feature instance not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Feature instance not found"))
featureCode = inst.featureCode
mandateId = str(inst.mandateId) if inst.mandateId else ""
if wsMandateId and mandateId and mandateId != wsMandateId:
- raise HTTPException(status_code=403, detail="Feature instance does not belong to workspace mandate")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Feature instance does not belong to workspace mandate"))
catalog = getCatalogService()
parentObj = None
@@ -1618,7 +1617,7 @@ async def createFeatureDataSource(
inst = rootIf.getFeatureInstance(body.featureInstanceId)
mandateId = str(inst.mandateId) if inst else (str(context.mandateId) if context.mandateId else "")
if wsMandateId and mandateId and mandateId != wsMandateId:
- raise HTTPException(status_code=403, detail="Feature instance does not belong to workspace mandate")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Feature instance does not belong to workspace mandate"))
fds = FeatureDataSource(
featureInstanceId=body.featureInstanceId,
@@ -1642,16 +1641,16 @@ async def listFeatureDataSources(
instanceId: str = Path(...),
context: RequestContext = Depends(getRequestContext),
):
- """List active FeatureDataSources for this workspace instance."""
- _validateInstanceAccess(instanceId, context)
+ """List active FeatureDataSources for this workspace instance, scoped to mandate."""
+ wsMandateId, _ = _validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
rootIf = getRootInterface()
- records = rootIf.db.getRecordset(
- FeatureDataSource,
- recordFilter={"workspaceInstanceId": instanceId},
- )
+ recordFilter: dict = {"workspaceInstanceId": instanceId}
+ if wsMandateId:
+ recordFilter["mandateId"] = wsMandateId
+ records = rootIf.db.getRecordset(FeatureDataSource, recordFilter=recordFilter)
return JSONResponse({"featureDataSources": records or []})
@@ -1664,7 +1663,7 @@ async def deleteFeatureDataSource(
context: RequestContext = Depends(getRequestContext),
):
"""Delete a FeatureDataSource."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
@@ -1682,14 +1681,14 @@ async def listConnectionServices(
context: RequestContext = Depends(getRequestContext),
):
"""Return the available services for a specific UserConnection."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.serviceCenter import getService as getSvc
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
+ mandate_id=_mandateId or "",
feature_instance_id=instanceId,
)
chatService = getSvc("chat", ctx)
@@ -1741,14 +1740,14 @@ async def browseConnectionService(
context: RequestContext = Depends(getRequestContext),
):
"""Browse folders/items within a connection's service at a given path."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
try:
from modules.connectors.connectorResolver import ConnectorResolver
from modules.serviceCenter import getService as getSvc
from modules.serviceCenter.context import ServiceCenterContext
ctx = ServiceCenterContext(
user=context.user,
- mandate_id=str(context.mandateId) if context.mandateId else None,
+ mandate_id=_mandateId or "",
feature_instance_id=instanceId,
)
chatService = getSvc("chat", ctx)
@@ -1786,7 +1785,7 @@ async def transcribeVoice(
context: RequestContext = Depends(getRequestContext),
):
"""Transcribe audio to text using speech-to-text."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
audioBytes = await audio.read()
try:
import aiohttp
@@ -1815,10 +1814,10 @@ async def synthesizeVoice(
context: RequestContext = Depends(getRequestContext),
):
"""Synthesize text to speech audio."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
text = body.get("text", "")
if not text:
- raise HTTPException(status_code=400, detail="text is required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("text is required"))
return JSONResponse({"audio": None, "note": "TTS via browser Speech Synthesis API recommended"})
@@ -1837,7 +1836,7 @@ async def getPendingEdits(
context: RequestContext = Depends(getRequestContext),
):
"""Return all pending file edit proposals for this workspace instance."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
editList = [e.model_dump() for e in _pendingEditsStore.forInstance(instanceId).getPending()]
return JSONResponse({"edits": editList})
@@ -1851,7 +1850,7 @@ async def acceptEdit(
context: RequestContext = Depends(getRequestContext),
):
"""Accept a proposed file edit -- applies the new content to the file."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
edit = _pendingEditsStore.forInstance(instanceId).get(editId)
if not edit:
raise HTTPException(status_code=404, detail=f"Edit proposal {editId} not found")
@@ -1862,7 +1861,7 @@ async def acceptEdit(
try:
success = dbMgmt.updateFileData(edit.fileId, edit.newContent.encode("utf-8"))
if not success:
- raise HTTPException(status_code=500, detail="Failed to update file data")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Failed to update file data"))
except HTTPException:
raise
except Exception as e:
@@ -1888,7 +1887,7 @@ async def rejectEdit(
context: RequestContext = Depends(getRequestContext),
):
"""Reject a proposed file edit -- discards the change."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
edit = _pendingEditsStore.forInstance(instanceId).get(editId)
if not edit:
raise HTTPException(status_code=404, detail=f"Edit proposal {editId} not found")
@@ -1913,7 +1912,7 @@ async def acceptAllEdits(
context: RequestContext = Depends(getRequestContext),
):
"""Accept all pending file edit proposals for this instance."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
instanceEdits = _pendingEditsStore.forInstance(instanceId)
dbMgmt = _getDbManagement(context, instanceId)
accepted = []
@@ -1944,7 +1943,7 @@ async def rejectAllEdits(
context: RequestContext = Depends(getRequestContext),
):
"""Reject all pending file edit proposals for this instance."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
instanceEdits = _pendingEditsStore.forInstance(instanceId)
rejected = []
@@ -2000,7 +1999,7 @@ async def updateGeneralSettings(
context: RequestContext = Depends(getRequestContext),
):
"""Update general workspace settings for the current user."""
- _validateInstanceAccess(instanceId, context)
+ _mandateId, _ = _validateInstanceAccess(instanceId, context)
wsInterface = _getWorkspaceInterface(context, instanceId)
userId = str(context.user.id)
diff --git a/modules/interfaces/interfaceBootstrap.py b/modules/interfaces/interfaceBootstrap.py
index b3b8bcd0..ee03ae01 100644
--- a/modules/interfaces/interfaceBootstrap.py
+++ b/modules/interfaces/interfaceBootstrap.py
@@ -25,6 +25,7 @@ from modules.datamodels.datamodelRbac import (
AccessRuleContext,
Role,
)
+from modules.datamodels.datamodelUtils import coerce_text_multilingual
from modules.datamodels.datamodelUam import AccessLevel
from modules.datamodels.datamodelMembership import (
UserMandate,
@@ -38,13 +39,21 @@ pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
# Cache für Role-IDs (roleLabel -> roleId)
_roleIdCache: Dict[str, str] = {}
+_bootstrapDone: bool = False
+
def initBootstrap(db: DatabaseConnector) -> None:
"""
Main bootstrap entry point - initializes all system components.
+ Idempotent: runs only once per process regardless of how many callers invoke it.
Args:
db: Database connector instance
"""
+ global _bootstrapDone
+ if _bootstrapDone:
+ return
+ _bootstrapDone = True
+
logger.info("Starting system bootstrap")
# Initialize root mandate
@@ -88,9 +97,6 @@ def initBootstrap(db: DatabaseConnector) -> None:
# Apply multi-tenant database optimizations (indexes, triggers, FKs)
_applyDatabaseOptimizations(db)
- # Seed automation templates (after admin user exists)
- initAutomationTemplates(db, adminUserId)
-
# Run root-user migration (one-time, sets completion flag)
migrationDone = False
try:
@@ -147,85 +153,136 @@ def initBootstrap(db: DatabaseConnector) -> None:
except Exception as e:
logger.warning(f"Mandate retention purge failed: {e}")
+ # Bootstrap system workflow templates for graphical editor
+ _bootstrapSystemTemplates(db)
-def initAutomationTemplates(dbApp: DatabaseConnector, adminUserId: Optional[str] = None) -> None:
+ # Ensure billing settings and accounts exist for all mandates
+ _bootstrapBilling()
+
+
+def _bootstrapBilling() -> None:
"""
- Seed initial automation templates from subAutomationTemplates.py.
- Only runs if no templates exist yet (bootstrap).
- Creates templates with sysCreatedBy = admin user (SysAdmin privilege).
-
- NOTE: AutomationTemplate lives in poweron_automation database, not poweron_app!
-
- Args:
- dbApp: Database connector for poweron_app (used to get admin user if needed)
- adminUserId: Admin user ID for sysCreatedBy field
+ Ensure billing settings and accounts exist for all mandates.
+ Idempotent: only creates missing settings/accounts.
"""
- import json
- from modules.features.automation.subAutomationTemplates import AUTOMATION_TEMPLATES
- from modules.features.automation.datamodelFeatureAutomation import AutomationTemplate
- from modules.shared.configuration import APP_CONFIG
-
- # Create connector for poweron_automation database (where templates live)
- dbHost = APP_CONFIG.get("DB_HOST", "_no_config_default_data")
- dbDatabase = "poweron_automation"
- dbUser = APP_CONFIG.get("DB_USER")
- dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
- dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
-
- dbAutomation = DatabaseConnector(
- dbHost=dbHost,
- dbDatabase=dbDatabase,
- dbUser=dbUser,
- dbPassword=dbPassword,
- dbPort=dbPort,
- userId=adminUserId,
- )
- dbAutomation.initDbSystem()
-
- # Check if templates already exist in poweron_automation
- existing = dbAutomation.getRecordset(AutomationTemplate)
- if existing:
- logger.info(f"Automation templates already seeded ({len(existing)} templates)")
- return
-
- # Get admin user ID if not provided (from poweron_app)
- if not adminUserId:
- adminUsers = dbApp.getRecordset(UserInDB, recordFilter={"email": APP_CONFIG.ADMIN_EMAIL})
- adminUserId = adminUsers[0]["id"] if adminUsers else None
- # Update context with admin user
- if adminUserId:
- dbAutomation.updateContext(adminUserId)
-
- templates = AUTOMATION_TEMPLATES.get("sets", [])
- createdCount = 0
-
- for i, templateSet in enumerate(templates):
- templateContent = templateSet.get("template", {})
- overview = templateContent.get("overview", f"Template {i+1}")
-
- # Create multilingual label from overview (use as German since current templates are German)
- # English is required by TextMultilingual, so we use the same value
- labelDict = {"en": overview, "ge": overview}
- overviewDict = {"en": overview, "ge": overview}
-
- # Create template WITHOUT parameters (no sharp values)
- templateData = {
- "label": labelDict,
- "overview": overviewDict,
- "template": json.dumps(templateContent), # Store entire template JSON
- "isSystem": True, # Seeded templates are system-level, visible to all users
- }
-
- try:
- dbAutomation.recordCreate(AutomationTemplate, templateData)
- createdCount += 1
- logger.debug(f"Created automation template: {overview}")
- except Exception as e:
- logger.error(f"Failed to create automation template '{overview}': {e}")
-
- logger.info(f"Seeded {createdCount} automation templates in poweron_automation database")
-
- logger.info("System bootstrap completed")
+ try:
+ from modules.interfaces.interfaceDbBilling import _getRootInterface as getBillingRootInterface
+
+ billingInterface = getBillingRootInterface()
+
+ settingsCreated = billingInterface.ensureAllMandateSettingsExist()
+ if settingsCreated > 0:
+ logger.info(f"Billing bootstrap: Created {settingsCreated} missing mandate billing settings")
+
+ accountsCreated = billingInterface.ensureAllUserAccountsExist()
+ if accountsCreated > 0:
+ logger.info(f"Billing bootstrap: Created {accountsCreated} missing user accounts")
+
+ except Exception as e:
+ logger.warning(f"Billing bootstrap failed (non-critical): {e}")
+
+
+def _bootstrapSystemTemplates(db: DatabaseConnector) -> None:
+ """
+ Seed platform-wide workflow templates (templateScope='system', mandateId=None).
+ Idempotent: skips if templates with the same label already exist.
+ """
+ try:
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
+ import uuid
+
+ greenfieldDb = DatabaseConnector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase="poweron_graphicaleditor",
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
+ )
+ greenfieldDb._ensureTableExists(AutoWorkflow)
+
+ existing = greenfieldDb.getRecordset(AutoWorkflow, recordFilter={
+ "isTemplate": True,
+ "templateScope": "system",
+ })
+ existingLabels = {r.get("label") if isinstance(r, dict) else getattr(r, "label", "") for r in (existing or [])}
+
+ templates = _buildSystemTemplates()
+ created = 0
+ for tpl in templates:
+ if tpl["label"] in existingLabels:
+ continue
+ tpl["id"] = str(uuid.uuid4())
+ greenfieldDb.recordCreate(AutoWorkflow, tpl)
+ created += 1
+
+ if created:
+ logger.info(f"Bootstrapped {created} system workflow template(s)")
+ greenfieldDb.close()
+ except Exception as e:
+ logger.warning(f"System workflow template bootstrap failed: {e}")
+
+
+def _buildSystemTemplates():
+ """Build the graph definitions for platform system templates."""
+ return [
+ {
+ "label": "Personal Assistant: E-Mail-Antwort-Drafting",
+ "mandateId": None,
+ "featureInstanceId": None,
+ "isTemplate": True,
+ "templateScope": "system",
+ "sharedReadOnly": True,
+ "active": False,
+ "graph": {
+ "nodes": [
+ {"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Täglicher Check", "parameters": {}},
+ {"id": "n2", "type": "email.checkEmail", "x": 300, "y": 200, "title": "Mailbox prüfen", "parameters": {}},
+ {"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro E-Mail", "parameters": {}},
+ {"id": "n4", "type": "ai.prompt", "x": 800, "y": 200, "title": "Analyse: Antwort nötig?", "parameters": {}},
+ {"id": "n5", "type": "flow.ifElse", "x": 1050, "y": 200, "title": "Antwort nötig?", "parameters": {}},
+ {"id": "n6", "type": "ai.prompt", "x": 1300, "y": 100, "title": "Kontext abrufen & Antwort formulieren", "parameters": {}},
+ {"id": "n7", "type": "email.draftEmail", "x": 1550, "y": 100, "title": "Draft erstellen", "parameters": {}},
+ ],
+ "connections": [
+ {"source": "n1", "target": "n2", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n2", "target": "n3", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n3", "target": "n4", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n4", "target": "n5", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n5", "target": "n6", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n6", "target": "n7", "sourceOutput": 0, "targetInput": 0},
+ ],
+ },
+ "invocations": [{"type": "schedule", "cronExpression": "0 8 * * 1-5"}],
+ },
+ {
+ "label": "Treuhand: PDF-Klassifizierung & Trustee-Import",
+ "mandateId": None,
+ "featureInstanceId": None,
+ "isTemplate": True,
+ "templateScope": "system",
+ "sharedReadOnly": True,
+ "active": False,
+ "graph": {
+ "nodes": [
+ {"id": "n1", "type": "trigger.schedule", "x": 50, "y": 200, "title": "Geplanter Import", "parameters": {}},
+ {"id": "n2", "type": "sharepoint.listFiles", "x": 300, "y": 200, "title": "SharePoint Ordner lesen", "parameters": {}},
+ {"id": "n3", "type": "flow.loop", "x": 550, "y": 200, "title": "Pro Dokument", "parameters": {}},
+ {"id": "n4", "type": "sharepoint.readFile", "x": 800, "y": 200, "title": "PDF-Inhalt lesen", "parameters": {}},
+ {"id": "n5", "type": "ai.prompt", "x": 1050, "y": 200, "title": "Typ klassifizieren (Rechnung, Beleg, Bankauszug, Vertrag, etc.)", "parameters": {}},
+ {"id": "n6", "type": "trustee.extractFromFiles", "x": 1300, "y": 200, "title": "Dokument extrahieren", "parameters": {}},
+ {"id": "n7", "type": "trustee.processDocuments", "x": 1550, "y": 200, "title": "In Trustee einlesen", "parameters": {}},
+ ],
+ "connections": [
+ {"source": "n1", "target": "n2", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n2", "target": "n3", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n3", "target": "n4", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n4", "target": "n5", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n5", "target": "n6", "sourceOutput": 0, "targetInput": 0},
+ {"source": "n6", "target": "n7", "sourceOutput": 0, "targetInput": 0},
+ ],
+ },
+ "invocations": [{"type": "schedule", "cronExpression": "0 7 * * 1-5"}],
+ },
+ ]
def initRootMandateFeatures(db: DatabaseConnector, mandateId: str) -> None:
@@ -491,7 +548,7 @@ def initRoles(db: DatabaseConnector) -> None:
standardRoles = [
Role(
roleLabel="admin",
- description={"en": "Administrator - Manage users and resources within mandate scope", "de": "Administrator - Benutzer und Ressourcen im Mandanten verwalten", "fr": "Administrateur - Gérer les utilisateurs et ressources dans le périmètre du mandat"},
+ description=coerce_text_multilingual("Administrator - Benutzer und Ressourcen im Mandanten verwalten"),
mandateId=None, # Global template role
featureInstanceId=None,
featureCode=None,
@@ -499,7 +556,7 @@ def initRoles(db: DatabaseConnector) -> None:
),
Role(
roleLabel="user",
- description={"en": "User - Standard user with access to own records", "de": "Benutzer - Standard-Benutzer mit Zugriff auf eigene Datensätze", "fr": "Utilisateur - Utilisateur standard avec accès à ses propres enregistrements"},
+ description=coerce_text_multilingual("Benutzer - Standard-Benutzer mit Zugriff auf eigene Datensätze"),
mandateId=None, # Global template role
featureInstanceId=None,
featureCode=None,
@@ -507,7 +564,7 @@ def initRoles(db: DatabaseConnector) -> None:
),
Role(
roleLabel="viewer",
- description={"en": "Viewer - Read-only access to group records", "de": "Betrachter - Nur-Lese-Zugriff auf Gruppen-Datensätze", "fr": "Visualiseur - Accès en lecture seule aux enregistrements du groupe"},
+ description=coerce_text_multilingual("Betrachter - Nur-Lese-Zugriff auf Gruppen-Datensätze"),
mandateId=None, # Global template role
featureInstanceId=None,
featureCode=None,
@@ -517,7 +574,7 @@ def initRoles(db: DatabaseConnector) -> None:
# Check specifically for system template roles:
# mandateId=NULL, isSystemRole=True, featureCode=NULL
- # Feature templates (e.g. automation admin) share the same labels but have featureCode set!
+ # Feature templates share the same labels but have featureCode set!
allTemplates = db.getRecordset(
Role,
recordFilter={"mandateId": None, "isSystemRole": True}
@@ -549,7 +606,7 @@ def _deduplicateRoles(db: DatabaseConnector) -> None:
# Group by (roleLabel, mandateId, featureInstanceId, featureCode)
# featureCode is essential: system template ('admin', None, None, None)
- # must NOT be grouped with feature template ('admin', None, None, 'automation')
+ # must NOT be grouped with feature template ('admin', None, None, '')
groups: dict = {}
for role in allRoles:
key = (role.get("roleLabel"), role.get("mandateId"), role.get("featureInstanceId"), role.get("featureCode"))
@@ -672,7 +729,7 @@ def copySystemRolesToMandate(db: DatabaseConnector, mandateId: str) -> int:
newRole = Role(
id=newRoleId,
roleLabel=roleLabel,
- description=templateRole.get("description", {}),
+ description=coerce_text_multilingual(templateRole.get("description", {})),
mandateId=mandateId,
featureInstanceId=None,
featureCode=None,
@@ -741,11 +798,7 @@ def _initSysAdminRole(db: DatabaseConnector, mandateId: str) -> Optional[str]:
logger.info("Creating sysadmin role in root mandate")
sysadminRole = Role(
roleLabel="sysadmin",
- description={
- "en": "System Administrator - Full administrative access across all mandates",
- "de": "System-Administrator - Vollständiger administrativer Zugriff über alle Mandanten",
- "fr": "Administrateur système - Accès administratif complet à tous les mandats"
- },
+ description=coerce_text_multilingual("System-Administrator - Vollständiger administrativer Zugriff über alle Mandanten"),
mandateId=mandateId,
featureInstanceId=None,
featureCode=None,
@@ -986,12 +1039,11 @@ def _createTableSpecificRules(db: DatabaseConnector) -> None:
# - data.uam.* → User Access Management (mandantenübergreifend)
# - data.chat.* → Chat/AI-Daten (benutzer-eigen, kein Mandantenkontext)
# - data.files.* → Dateien (benutzer-eigen)
- # - data.automation.* → Automation (benutzer-eigen)
# - data.feature.* → Mandanten-/Feature-spezifische Daten (dynamisch)
#
# GROUP-Berechtigung:
# - data.uam.*: GROUP filtert nach Mandant (via UserMandate)
- # - data.chat.*, data.files.*, data.automation.*: GROUP = MY (benutzer-eigen)
+ # - data.chat.*, data.files.*: GROUP = MY (benutzer-eigen)
# ==========================================================================
# -------------------------------------------------------------------------
@@ -1248,70 +1300,6 @@ def _createTableSpecificRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
- # -------------------------------------------------------------------------
- # Automation Namespace - User-owned, no mandate context
- # -------------------------------------------------------------------------
-
- # AutomationDefinition: Only MY-level access (user-owned)
- for roleId in [adminId, userId]:
- if roleId:
- tableRules.append(AccessRule(
- roleId=roleId,
- context=AccessRuleContext.DATA,
- item="data.automation.AutomationDefinition",
- view=True,
- read=AccessLevel.MY,
- create=AccessLevel.MY,
- update=AccessLevel.MY,
- delete=AccessLevel.MY,
- ))
- if viewerId:
- tableRules.append(AccessRule(
- roleId=viewerId,
- context=AccessRuleContext.DATA,
- item="data.automation.AutomationDefinition",
- view=True,
- read=AccessLevel.MY,
- create=AccessLevel.NONE,
- update=AccessLevel.NONE,
- delete=AccessLevel.NONE,
- ))
-
- # AutomationTemplate: Admin sees ALL (system templates), User sees only MY
- if adminId:
- tableRules.append(AccessRule(
- roleId=adminId,
- context=AccessRuleContext.DATA,
- item="data.automation.AutomationTemplate",
- view=True,
- read=AccessLevel.ALL, # SysAdmin sees all templates
- create=AccessLevel.ALL,
- update=AccessLevel.ALL,
- delete=AccessLevel.ALL,
- ))
- if userId:
- tableRules.append(AccessRule(
- roleId=userId,
- context=AccessRuleContext.DATA,
- item="data.automation.AutomationTemplate",
- view=True,
- read=AccessLevel.MY,
- create=AccessLevel.MY,
- update=AccessLevel.MY,
- delete=AccessLevel.MY,
- ))
- if viewerId:
- tableRules.append(AccessRule(
- roleId=viewerId,
- context=AccessRuleContext.DATA,
- item="data.automation.AutomationTemplate",
- view=True,
- read=AccessLevel.ALL, # Viewer can see all templates (read-only)
- create=AccessLevel.NONE,
- update=AccessLevel.NONE,
- delete=AccessLevel.NONE,
- ))
-
# -------------------------------------------------------------------------
# Billing Namespace - Billing accounts and transactions
# -------------------------------------------------------------------------
@@ -1623,12 +1611,6 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
# Users can only manage their own records (MY-level access)
tablesNeedingMyRules = [
"data.chat.ChatWorkflow",
- "data.automation.AutomationDefinition",
- ]
-
- # Tables where admin sees ALL (system-wide templates)
- tablesNeedingAllRulesForAdmin = [
- "data.automation.AutomationTemplate",
]
# Billing tables: read-only for all roles, scoped by role level
@@ -1681,47 +1663,6 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
delete=AccessLevel.NONE,
))
- # Admin rules for system templates (read ALL, write GROUP-scoped)
- for objectKey in tablesNeedingAllRulesForAdmin:
- # Admin: read ALL templates, create/update/delete within GROUP (mandate-scoped)
- if adminId and (adminId, objectKey) not in existingCombinations:
- missingRules.append(AccessRule(
- roleId=adminId,
- context=AccessRuleContext.DATA,
- item=objectKey,
- view=True,
- read=AccessLevel.ALL,
- create=AccessLevel.GROUP,
- update=AccessLevel.GROUP,
- delete=AccessLevel.GROUP,
- ))
-
- # User: MY-level access
- if userId and (userId, objectKey) not in existingCombinations:
- missingRules.append(AccessRule(
- roleId=userId,
- context=AccessRuleContext.DATA,
- item=objectKey,
- view=True,
- read=AccessLevel.MY,
- create=AccessLevel.MY,
- update=AccessLevel.MY,
- delete=AccessLevel.MY,
- ))
-
- # Viewer: ALL read-only (can see all templates)
- if viewerId and (viewerId, objectKey) not in existingCombinations:
- missingRules.append(AccessRule(
- roleId=viewerId,
- context=AccessRuleContext.DATA,
- item=objectKey,
- view=True,
- read=AccessLevel.ALL,
- create=AccessLevel.NONE,
- update=AccessLevel.NONE,
- delete=AccessLevel.NONE,
- ))
-
# Billing read-only rules: Admin=GROUP, User/Viewer=MY (own accounts/transactions)
for objectKey in billingReadOnlyTables:
# Admin: GROUP-level read (sees all accounts in their mandates)
@@ -1806,59 +1747,6 @@ def _ensureDataContextRules(db: DatabaseConnector) -> None:
logger.info(f"Created {len(missingRules)} missing DATA context rules")
# All DATA context rules already exist (nothing to create)
- # Update existing AutomationTemplate rules for admin/viewer to ALL access
- _updateAutomationTemplateRulesToAll(db, adminId, viewerId)
-
-
-def _updateAutomationTemplateRulesToAll(db: DatabaseConnector, adminId: Optional[str], viewerId: Optional[str]) -> None:
- """
- Update existing AutomationTemplate RBAC rules to correct levels.
- - Admin: read=ALL, create/update/delete=GROUP (mandate-scoped writes)
- - Viewer: read=ALL (read-only)
- """
- if not adminId and not viewerId:
- return
-
- templateObjectKey = "data.automation.AutomationTemplate"
-
- # Find existing rules for AutomationTemplate
- existingRules = db.getRecordset(
- AccessRule,
- recordFilter={
- "context": AccessRuleContext.DATA.value,
- "item": templateObjectKey
- }
- )
-
- updatedCount = 0
- for rule in existingRules:
- ruleId = rule.get("id")
- roleId = rule.get("roleId")
- currentReadLevel = rule.get("read")
-
- if roleId == adminId:
- # Admin: read ALL, write GROUP
- updates = {}
- if currentReadLevel != AccessLevel.ALL.value:
- updates["read"] = AccessLevel.ALL.value
- currentCreate = rule.get("create")
- if currentCreate == AccessLevel.ALL.value:
- updates["create"] = AccessLevel.GROUP.value
- updates["update"] = AccessLevel.GROUP.value
- updates["delete"] = AccessLevel.GROUP.value
- if updates:
- db.recordModify(AccessRule, ruleId, updates)
- updatedCount += 1
- logger.debug(f"Updated AutomationTemplate rule {ruleId} for admin to read=ALL, write=GROUP")
-
- elif roleId == viewerId and currentReadLevel == AccessLevel.MY.value:
- # Viewer: read ALL (read-only)
- db.recordModify(AccessRule, ruleId, {"read": AccessLevel.ALL.value})
- updatedCount += 1
- logger.debug(f"Updated AutomationTemplate rule {ruleId} for viewer to read=ALL")
-
- if updatedCount > 0:
- logger.info(f"Updated {updatedCount} AutomationTemplate RBAC rules")
def _createResourceContextRules(db: DatabaseConnector) -> None:
@@ -2002,7 +1890,6 @@ def _createStoreResourceRules(db: DatabaseConnector) -> None:
db: Database connector instance
"""
storeResources = [
- "resource.store.automation",
"resource.store.teamsbot",
"resource.store.workspace",
"resource.store.commcoach",
diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py
index d52c23d6..d4cb5b08 100644
--- a/modules/interfaces/interfaceDbApp.py
+++ b/modules/interfaces/interfaceDbApp.py
@@ -18,7 +18,6 @@ import uuid
from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector
from modules.shared.configuration import APP_CONFIG
from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp
-from modules.interfaces.interfaceBootstrap import initBootstrap
from modules.interfaces.interfaceRbac import getRecordsetWithRBAC
from modules.security.rbac import RbacClass
from modules.datamodels.datamodelUam import (
@@ -55,8 +54,6 @@ _gatewayInterfaces = {}
# Root interface instance
_rootAppObjects = None
-# Bootstrap completion flag - ensures bootstrap runs only ONCE per application lifecycle
-_bootstrapCompleted = False
# Password-Hashing
pwdContext = CryptContext(schemes=["argon2"], deprecated="auto")
@@ -79,9 +76,6 @@ class AppObjects:
# Initialize database
self._initializeDatabase()
- # Initialize standard records if needed
- self._initRecords()
-
# Set user context if provided
if currentUser:
self.setUserContext(currentUser)
@@ -195,29 +189,6 @@ class AppObjects:
return simpleFields, objectFields
- def _initRecords(self):
- """Initialize standard records if they don't exist.
-
- Uses a global flag to ensure bootstrap only runs ONCE per application lifecycle.
- The flag is set BEFORE calling bootstrap to prevent recursive calls during bootstrap.
- """
- global _bootstrapCompleted
-
- if _bootstrapCompleted:
- return
-
- # Set flag BEFORE bootstrap to prevent recursive calls during bootstrap
- _bootstrapCompleted = True
- logger.info("Starting bootstrap (will only run once)")
-
- try:
- initBootstrap(self.db)
- logger.info("Bootstrap completed successfully")
- except Exception as e:
- # Reset flag on failure so bootstrap can be retried
- _bootstrapCompleted = False
- logger.error(f"Bootstrap failed: {e}")
- raise
def checkRbacPermission(
@@ -592,6 +563,46 @@ class AppObjects:
logger.error(f"Error getting user by ID: {str(e)}")
return None
+ def getUsersByIds(self, userIds: list[str]) -> dict[str, User]:
+ """Batch-load users by IDs in a single SQL query (id = ANY(...)).
+ Returns {userId: User} dict. Skips IDs not found or not accessible."""
+ if not userIds:
+ return {}
+ try:
+ uniqueIds = list(set(userIds))
+ records = self.db.getRecordset(UserInDB, recordFilter={"id": uniqueIds})
+ result: dict[str, User] = {}
+ for rec in (records or []):
+ cleaned = dict(rec)
+ if cleaned.get("roleLabels") is None:
+ cleaned["roleLabels"] = []
+ uid = cleaned.get("id")
+ if uid:
+ result[uid] = User(**cleaned)
+ return result
+ except Exception as e:
+ logger.error(f"Error batch-loading users: {e}")
+ return {}
+
+ def getMandatesByIds(self, mandateIds: list[str]) -> dict[str, Mandate]:
+ """Batch-load mandates by IDs in a single SQL query (id = ANY(...)).
+ Returns {mandateId: Mandate} dict."""
+ if not mandateIds:
+ return {}
+ try:
+ uniqueIds = list(set(mandateIds))
+ records = self.db.getRecordset(Mandate, recordFilter={"id": uniqueIds})
+ result: dict[str, Mandate] = {}
+ for rec in (records or []):
+ cleaned = dict(rec)
+ mid = cleaned.get("id")
+ if mid:
+ result[mid] = Mandate(**cleaned)
+ return result
+ except Exception as e:
+ logger.error(f"Error batch-loading mandates: {e}")
+ return {}
+
def _getUserForAuthentication(self, username: str) -> Optional[Dict[str, Any]]:
"""
Get user record by username for authentication purposes.
@@ -655,7 +666,7 @@ class AppObjects:
password: str = None,
email: str = None,
fullName: str = None,
- language: str = "en",
+ language: str = "de",
enabled: bool = True,
authenticationAuthority: AuthAuthority = AuthAuthority.LOCAL,
externalId: str = None,
@@ -1986,6 +1997,8 @@ class AppObjects:
self._ensureUserBillingAccount(userId, mandateId)
self._syncSubscriptionQuantity(mandateId)
+ if not skipCapacityCheck:
+ self._adjustAiBudgetForUserChange(mandateId, delta=+1)
cleanedRecord = dict(createdRecord)
return UserMandate(**cleanedRecord)
@@ -2049,6 +2062,23 @@ class AppObjects:
raise
logger.debug(f"Subscription quantity sync skipped: {e}")
+ def _adjustAiBudgetForUserChange(self, mandateId: str, delta: int) -> None:
+ """Pro-rata AI budget credit/debit when a user is added or removed mid-cycle."""
+ try:
+ from modules.interfaces.interfaceDbSubscription import getInterface as getSubInterface
+ from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface
+ from modules.security.rootAccess import getRootUser
+ rootUser = getRootUser()
+ subIf = getSubInterface(rootUser, mandateId)
+ operative = subIf.getOperativeForMandate(mandateId)
+ if not operative:
+ return
+ planKey = operative.get("planKey", "")
+ billingIf = getBillingInterface(rootUser)
+ billingIf.adjustAiBudgetForUserChange(mandateId, planKey, delta)
+ except Exception as e:
+ logger.debug(f"AI budget adjustment skipped: {e}")
+
def deleteUserMandate(self, userId: str, mandateId: str) -> bool:
"""
Delete a UserMandate record (remove user from mandate).
@@ -2086,7 +2116,10 @@ class AppObjects:
if accId:
self.db.recordDelete(FeatureAccess, accId)
- return self.db.recordDelete(UserMandate, existing.id)
+ result = self.db.recordDelete(UserMandate, existing.id)
+ self._syncSubscriptionQuantity(mandateId)
+ self._adjustAiBudgetForUserChange(mandateId, delta=-1)
+ return result
except Exception as e:
logger.error(f"Error deleting UserMandate: {e}")
raise ValueError(f"Failed to delete UserMandate: {e}")
@@ -3744,8 +3777,8 @@ class AppObjects:
if conflictingRole and conflictingRole.id != roleId:
raise ValueError(f"Role with label '{role.roleLabel}' already exists")
- # Exclude id from model_dump - the URL roleId is authoritative
- updatedRole = self.db.recordModify(Role, roleId, role.model_dump(exclude={"id"}))
+ _IMMUTABLE_ROLE_FIELDS = {"id", "mandateId", "featureInstanceId", "featureCode", "isSystemRole"}
+ updatedRole = self.db.recordModify(Role, roleId, role.model_dump(exclude=_IMMUTABLE_ROLE_FIELDS))
logger.info(f"Updated role with ID {roleId}")
return Role(**updatedRole)
except Exception as e:
diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py
index 1ea1786a..342c98c0 100644
--- a/modules/interfaces/interfaceDbBilling.py
+++ b/modules/interfaces/interfaceDbBilling.py
@@ -964,33 +964,39 @@ class BillingObjects:
# =========================================================================
def creditSubscriptionBudget(self, mandateId: str, planKey: str, periodLabel: str = "") -> Optional[Dict[str, Any]]:
- """Credit the plan's budgetAiCHF to the mandate pool account.
+ """Credit AI budget to the mandate pool account.
+ Amount = budgetAiPerUserCHF * activeUsers (dynamic, not the static plan.budgetAiCHF).
Should be called once per billing period (initial activation + each invoice.paid).
Returns the created CREDIT transaction or None if budget is 0."""
from modules.datamodels.datamodelSubscription import _getPlan
plan = _getPlan(planKey)
- if not plan or not plan.budgetAiCHF or plan.budgetAiCHF <= 0:
+ if not plan or not plan.budgetAiPerUserCHF or plan.budgetAiPerUserCHF <= 0:
return None
+ from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ subRoot = _getSubRoot()
+ activeUsers = max(subRoot.countActiveUsers(mandateId), 1)
+ amount = plan.budgetAiPerUserCHF * activeUsers
+
poolAccount = self.getOrCreateMandateAccount(mandateId)
- description = f"AI-Budget ({planKey})"
+ description = f"AI-Budget ({planKey}, {activeUsers} User)"
if periodLabel:
description += f" – {periodLabel}"
transaction = BillingTransaction(
accountId=poolAccount["id"],
transactionType=TransactionTypeEnum.CREDIT,
- amount=plan.budgetAiCHF,
+ amount=amount,
description=description,
referenceType=ReferenceTypeEnum.SUBSCRIPTION,
referenceId=mandateId,
)
created = self.createTransaction(transaction)
logger.info(
- "AI-Budget credited mandate=%s plan=%s amount=%.2f CHF",
- mandateId, planKey, plan.budgetAiCHF,
+ "AI-Budget credited mandate=%s plan=%s users=%d amount=%.2f CHF",
+ mandateId, planKey, activeUsers, amount,
)
return created
@@ -1013,6 +1019,71 @@ class BillingObjects:
return self.creditSubscriptionBudget(mandateId, planKey, periodLabel="Erstaktivierung")
+ def adjustAiBudgetForUserChange(self, mandateId: str, planKey: str, delta: int) -> Optional[Dict[str, Any]]:
+ """Pro-rata AI budget adjustment when users are added/removed mid-cycle.
+
+ delta > 0: user added -> CREDIT pro-rata portion
+ delta < 0: user removed -> DEBIT pro-rata portion
+ """
+ from modules.datamodels.datamodelSubscription import _getPlan
+
+ plan = _getPlan(planKey)
+ if not plan or not plan.budgetAiPerUserCHF or plan.budgetAiPerUserCHF <= 0:
+ return None
+
+ from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
+ subRoot = _getSubRoot()
+ operative = subRoot.getOperativeForMandate(mandateId)
+ if not operative:
+ return None
+
+ periodStart = operative.get("currentPeriodStart")
+ periodEnd = operative.get("currentPeriodEnd")
+ if not periodStart or not periodEnd:
+ return None
+
+ if isinstance(periodStart, str):
+ periodStart = datetime.fromisoformat(periodStart)
+ if isinstance(periodEnd, str):
+ periodEnd = datetime.fromisoformat(periodEnd)
+ if periodStart.tzinfo is None:
+ periodStart = periodStart.replace(tzinfo=timezone.utc)
+ if periodEnd.tzinfo is None:
+ periodEnd = periodEnd.replace(tzinfo=timezone.utc)
+
+ now = datetime.now(timezone.utc)
+ totalSeconds = (periodEnd - periodStart).total_seconds()
+ remainingSeconds = max((periodEnd - now).total_seconds(), 0)
+ proRataFraction = remainingSeconds / totalSeconds if totalSeconds > 0 else 0
+
+ amount = round(abs(delta) * plan.budgetAiPerUserCHF * proRataFraction, 2)
+ if amount <= 0:
+ return None
+
+ poolAccount = self.getOrCreateMandateAccount(mandateId)
+
+ if delta > 0:
+ txType = TransactionTypeEnum.CREDIT
+ label = f"AI-Budget pro-rata +{abs(delta)} User ({planKey})"
+ else:
+ txType = TransactionTypeEnum.DEBIT
+ label = f"AI-Budget pro-rata -{abs(delta)} User ({planKey})"
+
+ transaction = BillingTransaction(
+ accountId=poolAccount["id"],
+ transactionType=txType,
+ amount=amount,
+ description=label,
+ referenceType=ReferenceTypeEnum.SUBSCRIPTION,
+ referenceId=mandateId,
+ )
+ created = self.createTransaction(transaction)
+ logger.info(
+ "AI-Budget pro-rata %s mandate=%s delta=%+d amount=%.2f CHF (fraction=%.4f)",
+ txType.value, mandateId, delta, amount, proRataFraction,
+ )
+ return created
+
# =========================================================================
# Workflow Cost Query
# =========================================================================
@@ -1416,6 +1487,355 @@ class BillingObjects:
return balances
+ @staticmethod
+ def _mapPaginationColumns(pagination: PaginationParams) -> PaginationParams:
+ """Remap frontend column names to DB column names in filters and sort."""
+ _COL_MAP = {"createdAt": "sysCreatedAt"}
+ _ENRICHED_COLS = {"mandateName", "userName", "mandateId", "userId"}
+ import copy
+ p = copy.deepcopy(pagination)
+ if p.filters:
+ mapped = {}
+ for k, v in p.filters.items():
+ if k in _ENRICHED_COLS:
+ continue
+ mapped[_COL_MAP.get(k, k)] = v
+ p.filters = mapped
+ if p.sort:
+ mapped = []
+ for s in p.sort:
+ field = s.get("field", "") if isinstance(s, dict) else getattr(s, "field", "")
+ if field in _ENRICHED_COLS:
+ continue
+ newField = _COL_MAP.get(field, field)
+ if isinstance(s, dict):
+ mapped.append({**s, "field": newField})
+ else:
+ mapped.append({"field": newField, "direction": getattr(s, "direction", "asc")})
+ p.sort = mapped if mapped else [{"field": "sysCreatedAt", "direction": "desc"}]
+ return p
+
+ def getTransactionsForMandatesPaginated(
+ self,
+ mandateIds: Optional[List[str]],
+ pagination: PaginationParams,
+ scope: str = "all",
+ userId: Optional[str] = None,
+ ) -> PaginatedResult:
+ """
+ SQL-level paginated transactions across multiple mandates.
+ Single SQL query with WHERE accountId = ANY(...), ORDER BY, LIMIT/OFFSET.
+ Enrichment (userName, mandateName) only for the returned page.
+ """
+ from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
+
+ try:
+ mappedPagination = self._mapPaginationColumns(pagination)
+
+ allAccounts = self.db.getRecordset(BillingAccount)
+ if mandateIds:
+ allAccounts = [a for a in allAccounts if a.get("mandateId") in set(mandateIds)]
+
+ accountIds = [a.get("id") for a in allAccounts if a.get("id")]
+ if not accountIds:
+ return PaginatedResult(items=[], totalItems=0, totalPages=0)
+
+ recordFilter: Dict[str, Any] = {"accountId": accountIds}
+ if userId:
+ recordFilter["createdByUserId"] = userId
+
+ result = self.db.getRecordsetPaginated(
+ BillingTransaction,
+ pagination=mappedPagination,
+ recordFilter=recordFilter,
+ )
+ pageItems = result.get("items", []) if isinstance(result, dict) else result.items
+
+ accountMap = {a.get("id"): a for a in allAccounts}
+
+ pageUserIds = set()
+ pageMandateIds = set()
+ for t in pageItems:
+ accId = t.get("accountId")
+ acc = accountMap.get(accId, {})
+ mid = acc.get("mandateId")
+ uid = t.get("createdByUserId") or acc.get("userId")
+ if uid:
+ pageUserIds.add(uid)
+ if mid:
+ pageMandateIds.add(mid)
+
+ appInterface = getAppInterface(self.currentUser)
+ userMap: Dict[str, str] = {}
+ if pageUserIds:
+ users = appInterface.getUsersByIds(list(pageUserIds))
+ for uid, u in users.items():
+ dn = getattr(u, "displayName", None) or getattr(u, "username", None) or uid
+ userMap[uid] = dn
+
+ mandateMap: Dict[str, str] = {}
+ if pageMandateIds:
+ mandates = appInterface.getMandatesByIds(list(pageMandateIds))
+ for mid, m in mandates.items():
+ mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or mid
+
+ enriched = []
+ for t in pageItems:
+ row = dict(t)
+ accId = row.get("accountId")
+ acc = accountMap.get(accId, {})
+ mid = acc.get("mandateId")
+ txUserId = row.get("createdByUserId") or acc.get("userId")
+ row["mandateId"] = mid
+ row["mandateName"] = mandateMap.get(mid, "")
+ row["userId"] = txUserId
+ row["userName"] = userMap.get(txUserId, txUserId) if txUserId else None
+ enriched.append(row)
+
+ totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
+ totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages
+
+ return PaginatedResult(items=enriched, totalItems=totalItems, totalPages=totalPages)
+
+ except Exception as e:
+ logger.error(f"Error in getTransactionsForMandatesPaginated: {e}")
+ return PaginatedResult(items=[], totalItems=0, totalPages=0)
+
+ def _buildScopeFilter(
+ self,
+ mandateIds: Optional[List[str]],
+ scope: str = "all",
+ userId: Optional[str] = None,
+ startTs: Optional[float] = None,
+ endTs: Optional[float] = None,
+ ) -> tuple:
+ """Build WHERE clause parts for scoped transaction queries. Returns (conditions, values, accountIds)."""
+ allAccounts = self.db.getRecordset(BillingAccount)
+ if mandateIds:
+ mandateSet = set(mandateIds)
+ allAccounts = [a for a in allAccounts if a.get("mandateId") in mandateSet]
+
+ accountIds = [a.get("id") for a in allAccounts if a.get("id")]
+ if not accountIds:
+ return [], [], [], allAccounts
+
+ conditions = ['"accountId" = ANY(%s)', '"transactionType" = %s']
+ values: list = [accountIds, "DEBIT"]
+
+ if userId:
+ conditions.append('"createdByUserId" = %s')
+ values.append(userId)
+
+ if startTs is not None:
+ conditions.append('"sysCreatedAt" >= %s')
+ values.append(startTs)
+ if endTs is not None:
+ conditions.append('"sysCreatedAt" < %s')
+ values.append(endTs)
+
+ return conditions, values, accountIds, allAccounts
+
+ def getTransactionStatisticsAggregated(
+ self,
+ mandateIds: Optional[List[str]],
+ scope: str = "all",
+ userId: Optional[str] = None,
+ startTs: Optional[float] = None,
+ endTs: Optional[float] = None,
+ period: str = "month",
+ ) -> Dict[str, Any]:
+ """
+ Pure SQL aggregation for statistics. No row-level loading.
+ Returns: totalCost, transactionCount, costByProvider, costByModel,
+ costByFeature, costByAccountId, timeSeries
+ """
+ table = BillingTransaction.__name__
+
+ try:
+ if not self.db._ensureTableExists(BillingTransaction):
+ return self._emptyStats()
+
+ conditions, values, accountIds, allAccounts = self._buildScopeFilter(
+ mandateIds, scope, userId, startTs, endTs
+ )
+ if not accountIds:
+ return self._emptyStats()
+
+ whereClause = " WHERE " + " AND ".join(conditions)
+ self.db._ensure_connection()
+
+ result: Dict[str, Any] = {}
+
+ with self.db.connection.cursor() as cur:
+ # 1) Totals
+ cur.execute(
+ f'SELECT COALESCE(SUM("amount"), 0) AS total, COUNT(*) AS cnt FROM "{table}"{whereClause}',
+ values,
+ )
+ row = cur.fetchone()
+ result["totalCost"] = round(float(row["total"]), 4)
+ result["transactionCount"] = int(row["cnt"])
+
+ # 2) GROUP BY aicoreProvider
+ cur.execute(
+ f'SELECT COALESCE("aicoreProvider", \'unknown\') AS grp, SUM("amount") AS total '
+ f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC',
+ values,
+ )
+ result["costByProvider"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()}
+
+ # 3) GROUP BY aicoreModel
+ cur.execute(
+ f'SELECT COALESCE("aicoreModel", \'unknown\') AS grp, SUM("amount") AS total '
+ f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC',
+ values,
+ )
+ result["costByModel"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()}
+
+ # 4) GROUP BY accountId (will be enriched to mandateName by caller)
+ cur.execute(
+ f'SELECT "accountId" AS grp, SUM("amount") AS total '
+ f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC',
+ values,
+ )
+ result["costByAccountId"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()}
+
+ # 5) GROUP BY accountId + featureCode (for costByFeature)
+ cur.execute(
+ f'SELECT "accountId", COALESCE("featureCode", \'unknown\') AS fc, SUM("amount") AS total '
+ f'FROM "{table}"{whereClause} GROUP BY "accountId", fc ORDER BY total DESC',
+ values,
+ )
+ result["costByAccountFeature"] = [
+ {"accountId": r["accountId"], "featureCode": r["fc"], "total": round(float(r["total"]), 4)}
+ for r in cur.fetchall()
+ ]
+
+ # 6) Time series via DATE_TRUNC on epoch timestamp
+ if period == "day":
+ truncExpr = "DATE_TRUNC('day', TO_TIMESTAMP(\"sysCreatedAt\"))"
+ else:
+ truncExpr = "DATE_TRUNC('month', TO_TIMESTAMP(\"sysCreatedAt\"))"
+
+ cur.execute(
+ f'SELECT {truncExpr} AS bucket, SUM("amount") AS total, COUNT(*) AS cnt '
+ f'FROM "{table}"{whereClause} AND "sysCreatedAt" IS NOT NULL '
+ f'GROUP BY bucket ORDER BY bucket',
+ values,
+ )
+ timeSeries = []
+ for r in cur.fetchall():
+ bucket = r["bucket"]
+ if period == "day":
+ label = bucket.strftime("%Y-%m-%d") if bucket else "unknown"
+ else:
+ label = bucket.strftime("%Y-%m") if bucket else "unknown"
+ timeSeries.append({
+ "date": label,
+ "cost": round(float(r["total"]), 4),
+ "count": int(r["cnt"]),
+ })
+ result["timeSeries"] = timeSeries
+
+ self.db.connection.commit()
+
+ result["_allAccounts"] = allAccounts
+ return result
+
+ except Exception as e:
+ logger.error(f"Error in getTransactionStatisticsAggregated: {e}", exc_info=True)
+ try:
+ self.db.connection.rollback()
+ except Exception:
+ pass
+ return self._emptyStats()
+
+ @staticmethod
+ def _emptyStats() -> Dict[str, Any]:
+ return {
+ "totalCost": 0.0,
+ "transactionCount": 0,
+ "costByProvider": {},
+ "costByModel": {},
+ "costByAccountId": {},
+ "costByAccountFeature": [],
+ "timeSeries": [],
+ "_allAccounts": [],
+ }
+
+ def getTransactionDistinctValues(
+ self,
+ mandateIds: Optional[List[str]],
+ column: str,
+ pagination: Optional[PaginationParams] = None,
+ scope: str = "all",
+ userId: Optional[str] = None,
+ ) -> List[str]:
+ """SQL DISTINCT for filter-values on BillingTransaction, scoped by mandates."""
+ _COLUMN_MAP = {
+ "createdAt": "sysCreatedAt",
+ "mandateId": "accountId",
+ "mandateName": "accountId",
+ }
+ dbColumn = _COLUMN_MAP.get(column, column)
+
+ mappedPagination = self._mapPaginationColumns(pagination) if pagination else None
+
+ try:
+ allAccounts = self.db.getRecordset(BillingAccount)
+ if mandateIds:
+ allAccounts = [a for a in allAccounts if a.get("mandateId") in set(mandateIds)]
+ accountIds = [a.get("id") for a in allAccounts if a.get("id")]
+ if not accountIds:
+ return []
+
+ recordFilter: Dict[str, Any] = {"accountId": accountIds}
+ if userId:
+ recordFilter["createdByUserId"] = userId
+
+ if column in ("mandateName", "userName"):
+ return self._getEnrichedDistinctValues(column, allAccounts, recordFilter, mappedPagination)
+
+ return self.db.getDistinctColumnValues(
+ BillingTransaction, dbColumn, mappedPagination, recordFilter
+ )
+ except Exception as e:
+ logger.error(f"Error in getTransactionDistinctValues({column}): {e}")
+ return []
+
+ def _getEnrichedDistinctValues(
+ self,
+ column: str,
+ allAccounts: List[Dict],
+ recordFilter: Dict[str, Any],
+ pagination: Optional[PaginationParams],
+ ) -> List[str]:
+ """Resolve enriched columns (mandateName, userName) via batch lookup."""
+ from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
+
+ if column == "mandateName":
+ mandateIds = list({a.get("mandateId") for a in allAccounts if a.get("mandateId")})
+ appInterface = getAppInterface(self.currentUser)
+ mandates = appInterface.getMandatesByIds(mandateIds)
+ return sorted(
+ {getattr(m, "label", None) or getattr(m, "name", "") or mid for mid, m in mandates.items()},
+ key=lambda v: v.lower(),
+ )
+
+ if column == "userName":
+ dbCol = "createdByUserId"
+ values = self.db.getDistinctColumnValues(BillingTransaction, dbCol, pagination, recordFilter)
+ if not values:
+ return []
+ appInterface = getAppInterface(self.currentUser)
+ users = appInterface.getUsersByIds(values)
+ return sorted(
+ {getattr(u, "displayName", None) or getattr(u, "username", None) or uid for uid, u in users.items()},
+ key=lambda v: v.lower(),
+ )
+
+ return []
+
def getUserTransactionsForMandates(self, mandateIds: List[str] = None, limit: int = 100) -> List[Dict[str, Any]]:
"""
Get all transactions for specified mandates.
diff --git a/modules/interfaces/interfaceDbChat.py b/modules/interfaces/interfaceDbChat.py
index 60f4db44..874fa589 100644
--- a/modules/interfaces/interfaceDbChat.py
+++ b/modules/interfaces/interfaceDbChat.py
@@ -678,20 +678,22 @@ class ChatObjects:
return list(matchedIds)
def getWorkflow(self, workflowId: str) -> Optional[ChatWorkflow]:
- """Returns a workflow by ID if user has access."""
- # Use RBAC filtering with featureInstanceId for instance-level isolation
+ """Returns a workflow by ID if user has access.
+
+ Returns None when the workflow does not exist / RBAC denies access
+ or when the stored data fails model validation (logged separately).
+ """
workflows = self._getRecordset(ChatWorkflow, recordFilter={"id": workflowId})
-
+
if not workflows:
+ logger.debug(f"getWorkflow: no record for {workflowId} (RBAC filter or not found)")
return None
-
+
workflow = workflows[0]
try:
logs = self.getLogs(workflowId)
messages = self.getMessages(workflowId)
-
- # Validate workflow data against ChatWorkflow model
- # Explicit type coercion: DB may store numeric fields as TEXT on some platforms
+
def _toInt(v, default=0):
try:
return int(v) if v is not None else default
@@ -719,7 +721,7 @@ class ChatObjects:
messages=messages
)
except Exception as e:
- logger.error(f"Error validating workflow data: {str(e)}")
+ logger.error(f"getWorkflow: data validation failed for {workflowId}: {e}")
return None
def createWorkflow(self, workflowData: Dict[str, Any]) -> ChatWorkflow:
@@ -1040,6 +1042,10 @@ class ChatObjects:
workflowId = messageData["workflowId"]
workflow = self.getWorkflow(workflowId)
if not workflow:
+ logger.warning(
+ f"createMessage: workflow {workflowId} returned None "
+ f"(RBAC denied, not found, or data validation failed — see preceding logs)"
+ )
raise PermissionError(f"No access to workflow {workflowId}")
if not self.checkRbacPermission(ChatWorkflow, "update", workflowId):
@@ -1161,7 +1167,7 @@ class ChatObjects:
data={
"type": "message",
"createdAt": message_timestamp,
- "item": chat_message.dict()
+ "item": chat_message.model_dump()
},
event_category="chat"
))
@@ -1535,7 +1541,7 @@ class ChatObjects:
data={
"type": "log",
"createdAt": log_timestamp,
- "item": ChatLog(**createdLog).dict()
+ "item": ChatLog(**createdLog).model_dump()
},
event_category="chat"
))
diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py
index 4fd7c15c..9589f7d6 100644
--- a/modules/interfaces/interfaceDbManagement.py
+++ b/modules/interfaces/interfaceDbManagement.py
@@ -14,7 +14,7 @@ import mimetypes
from typing import Dict, Any, List, Optional, Union
from modules.connectors.connectorDbPostgre import DatabaseConnector, _get_cached_connector
-from modules.interfaces.interfaceRbac import getRecordsetWithRBAC
+from modules.interfaces.interfaceRbac import getRecordsetWithRBAC, getRecordsetPaginatedWithRBAC
from modules.security.rbac import RbacClass
from modules.datamodels.datamodelRbac import AccessRuleContext
from modules.datamodels.datamodelUam import AccessLevel
@@ -187,6 +187,7 @@ class ComponentObjects:
try:
# Initialize standard prompts
self._initializeStandardPrompts()
+ self._seedUiLanguageSetsIfEmpty()
# Add other record initializations here
@@ -196,6 +197,48 @@ class ComponentObjects:
# Don't raise the error, just log it
# This allows the interface to be created even if initialization fails
+ def _seedUiLanguageSetsIfEmpty(self) -> None:
+ try:
+ import json
+ from pathlib import Path
+
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+
+ existing = self.db.getRecordset(UiLanguageSet)
+ if existing:
+ return
+ seedPath = (
+ Path(__file__).resolve().parent.parent
+ / "migration"
+ / "seedData"
+ / "ui_language_seed.json"
+ )
+ if not seedPath.is_file():
+ logger.warning("ui_language_seed.json not found, skipping UI i18n seed")
+ return
+ payload = json.loads(seedPath.read_text(encoding="utf-8"))
+ now = getUtcTimestamp()
+ for row in payload:
+ entries = row.get("entries")
+ if not isinstance(entries, list):
+ keys = row.get("keys") or {}
+ entries = [{"context": "ui", "key": k, "value": v} for k, v in keys.items()]
+ rec = {
+ "id": row["id"],
+ "label": row["label"],
+ "entries": entries,
+ "status": row.get("status") or "complete",
+ "isDefault": bool(row.get("isDefault", False)),
+ "sysCreatedAt": now,
+ "sysModifiedBy": None,
+ "sysCreatedBy": None,
+ "sysModifiedAt": now,
+ }
+ self.db.recordCreate(UiLanguageSet, rec)
+ logger.info("Seeded UiLanguageSet rows from ui_language_seed.json")
+ except Exception as e:
+ logger.error(f"UI i18n seed failed: {e}")
+
def _initializeStandardPrompts(self):
"""Initializes standard prompts if they don't exist yet."""
try:
@@ -791,32 +834,43 @@ class ComponentObjects:
# File Utilities
def checkForDuplicateFile(self, fileHash: str, fileName: str) -> Optional[FileItem]:
- """Checks if a file with the same hash AND fileName already exists for the current user.
+ """Checks if a file with the same hash AND fileName already exists for the current user
+ **within the same scope** (mandateId + featureInstanceId).
- Duplicate = same user (sysCreatedBy) + same fileHash + same fileName.
+ Duplicate = same user + same fileHash + same fileName + same scope.
Same hash with different name is allowed (intentional copy by user).
- Uses direct DB query (not RBAC) because files are isolated per user.
"""
if not self.userId:
return None
- # Direct DB query: find files with matching hash + name + user
+ recordFilter: dict = {
+ "sysCreatedBy": self.userId,
+ "fileHash": fileHash,
+ "fileName": fileName,
+ }
+ if self.featureInstanceId:
+ recordFilter["featureInstanceId"] = self.featureInstanceId
+ elif self.mandateId:
+ recordFilter["mandateId"] = self.mandateId
+
matchingFiles = self.db.getRecordset(
FileItem,
- recordFilter={
- "sysCreatedBy": self.userId,
- "fileHash": fileHash,
- "fileName": fileName
- }
+ recordFilter=recordFilter,
)
if not matchingFiles:
return None
- # Return first match
file = matchingFiles[0]
+ fileId = file["id"]
+
+ fileDataExists = self.db.getRecordset(FileData, recordFilter={"id": fileId})
+ if not fileDataExists:
+ logger.warning(f"Duplicate FileItem {fileId} found but FileData missing — treating as new file")
+ return None
+
return FileItem(
- id=file["id"],
+ id=fileId,
mandateId=file.get("mandateId", ""),
featureInstanceId=file.get("featureInstanceId", ""),
fileName=file["fileName"],
@@ -826,77 +880,66 @@ class ComponentObjects:
sysCreatedAt=file.get("sysCreatedAt"),
)
+ # Class-level cache — built once from the ExtractorRegistry
+ _extensionToMime: Optional[Dict[str, str]] = None
+ _textMimeTypes: Optional[set] = None
+
+ @classmethod
+ def _ensureMimeMaps(cls):
+ """Lazily build extension→MIME and text-MIME-set from the ExtractorRegistry."""
+ if cls._extensionToMime is not None:
+ return
+ try:
+ from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry
+ registry = ExtractorRegistry()
+ cls._extensionToMime = registry.getExtensionToMimeMap()
+
+ # Collect all MIME types declared by the TextExtractor (and other text-ish extractors)
+ textMimes: set = set()
+ seen: set = set()
+ for ext in registry._map.values():
+ eid = id(ext)
+ if eid in seen:
+ continue
+ seen.add(eid)
+ mimes = ext.getSupportedMimeTypes()
+ if any(m.startswith("text/") for m in mimes):
+ textMimes.update(mimes)
+ # Always include common text types
+ textMimes.update({
+ "application/json", "application/xml", "application/javascript",
+ "application/sql", "application/x-yaml", "application/x-toml",
+ })
+ cls._textMimeTypes = textMimes
+ except Exception:
+ cls._extensionToMime = {}
+ cls._textMimeTypes = set()
+
def getMimeType(self, fileName: str) -> str:
- """Determines the MIME type based on the file extension."""
- ext = os.path.splitext(fileName)[1].lower()[1:]
- extensionToMime = {
- "pdf": "application/pdf",
- "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
- "doc": "application/msword",
- "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
- "xls": "application/vnd.ms-excel",
- "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
- "ppt": "application/vnd.ms-powerpoint",
- "csv": "text/csv",
- "txt": "text/plain",
- "json": "application/json",
- "xml": "application/xml",
- "html": "text/html",
- "htm": "text/html",
- "jpg": "image/jpeg",
- "jpeg": "image/jpeg",
- "png": "image/png",
- "gif": "image/gif",
- "webp": "image/webp",
- "svg": "image/svg+xml",
- "py": "text/x-python",
- "js": "application/javascript",
- "css": "text/css",
- "eml": "message/rfc822",
- "msg": "application/vnd.ms-outlook",
- }
- return extensionToMime.get(ext.lower(), "application/octet-stream")
+ """Determines the MIME type based on the file extension.
+
+ Resolution order:
+ 1. ExtractorRegistry (derived from all registered extractors)
+ 2. Python stdlib mimetypes.guess_type
+ 3. Fallback: application/octet-stream
+ """
+ self._ensureMimeMaps()
+ ext = os.path.splitext(fileName)[1].lower().lstrip('.')
+ if ext and ext in self._extensionToMime:
+ return self._extensionToMime[ext]
+ guessed, _ = mimetypes.guess_type(fileName, strict=False)
+ return guessed or "application/octet-stream"
def isTextMimeType(self, mimeType: str) -> bool:
- """Determines if a MIME type represents a text-based format."""
- textMimeTypes = {
- 'text/plain',
- 'text/html',
- 'text/css',
- 'text/javascript',
- 'text/x-python',
- 'text/csv',
- 'text/xml',
- 'application/json',
- 'application/xml',
- 'application/javascript',
- 'application/x-python',
- 'application/x-httpd-php',
- 'application/x-sh',
- 'application/x-shellscript',
- 'application/x-yaml',
- 'application/x-toml',
- 'application/x-markdown',
- 'application/x-latex',
- 'application/x-tex',
- 'application/x-rst',
- 'application/x-asciidoc',
- 'application/x-markdown',
- 'application/x-httpd-php',
- 'application/x-httpd-php-source',
- 'application/x-httpd-php3',
- 'application/x-httpd-php4',
- 'application/x-httpd-php5',
- 'application/x-httpd-php7',
- 'application/x-httpd-php8',
- 'application/x-httpd-php-source',
- 'application/x-httpd-php3-source',
- 'application/x-httpd-php4-source',
- 'application/x-httpd-php5-source',
- 'application/x-httpd-php7-source',
- 'application/x-httpd-php8-source'
- }
- return mimeType.lower() in textMimeTypes
+ """Determines if a MIME type represents a text-based format.
+
+ Derived from the MIME types declared by text-oriented extractors.
+ """
+ self._ensureMimeMaps()
+ lower = mimeType.lower()
+ if lower.startswith("text/"):
+ return True
+ return lower in self._textMimeTypes
# File methods - metadata-based operations
@@ -907,12 +950,21 @@ class ComponentObjects:
if recordFilter:
filterDict.update(recordFilter)
return self.db.getRecordset(FileItem, recordFilter=filterDict)
+
+ def _isMandatelessFile(self, file: Dict[str, Any]) -> bool:
+ """A file has no mandate context if mandateId is empty, None, or missing."""
+ mid = file.get("mandateId")
+ return not mid or (isinstance(mid, str) and not mid.strip())
- def getAllFiles(self, pagination: Optional[PaginationParams] = None) -> Union[List[FileItem], PaginatedResult]:
+ def getAllFiles(self, pagination: Optional[PaginationParams] = None, recordFilter: Dict[str, Any] = None) -> Union[List[FileItem], PaginatedResult]:
"""
- Returns files owned by the current user (user-scoped, not RBAC-based).
- Every user (including SysAdmin) only sees their own files.
- Supports optional pagination, sorting, and filtering via database-level queries.
+ Returns files visible to the current user based on RBAC scope rules.
+
+ Visibility (via RBAC GROUP / scope-based filtering):
+ - Own files (sysCreatedBy = currentUser) always visible
+ - Files with scope='global' visible to everyone
+ - Files with scope='mandate' visible to members of that mandate
+ - Files with scope='featureInstance' visible to users with access to that instance
Args:
pagination: Optional pagination parameters. If None, returns all items.
@@ -921,9 +973,6 @@ class ComponentObjects:
If pagination is None: List[FileItem]
If pagination is provided: PaginatedResult with items and metadata
"""
- # User-scoping filter: every user only sees their own files (bypasses RBAC SysAdmin override)
- recordFilter = {"sysCreatedBy": self.userId}
-
def _convertFileItems(files):
fileItems = []
for file in files:
@@ -949,30 +998,41 @@ class ComponentObjects:
logger.warning(f"Skipping invalid file record: {str(e)}")
continue
return fileItems
-
+
if pagination is None:
- allFiles = self._getFilesByCurrentUser()
+ allFiles = getRecordsetWithRBAC(
+ self.db, FileItem, self.currentUser,
+ recordFilter=recordFilter,
+ mandateId=self.mandateId,
+ featureInstanceId=self.featureInstanceId,
+ )
return _convertFileItems(allFiles)
-
- # Database-level pagination: filtering, sorting, and LIMIT/OFFSET happen in SQL
- result = self.db.getRecordsetPaginated(
- FileItem,
+
+ result = getRecordsetPaginatedWithRBAC(
+ self.db, FileItem, self.currentUser,
pagination=pagination,
- recordFilter=recordFilter
- )
-
- items = _convertFileItems(result["items"])
-
- return PaginatedResult(
- items=items,
- totalItems=result["totalItems"],
- totalPages=result["totalPages"]
+ recordFilter=recordFilter,
+ mandateId=self.mandateId,
+ featureInstanceId=self.featureInstanceId,
)
+
+ if isinstance(result, PaginatedResult):
+ return PaginatedResult(
+ items=_convertFileItems(result.items),
+ totalItems=result.totalItems,
+ totalPages=result.totalPages,
+ )
+
+ return _convertFileItems(result if isinstance(result, list) else [])
def getFile(self, fileId: str) -> Optional[FileItem]:
- """Returns a file by ID if it belongs to the current user (user-scoped)."""
- # Files are always user-scoped: filter by sysCreatedBy (bypasses RBAC SysAdmin override)
- filteredFiles = self._getFilesByCurrentUser(recordFilter={"id": fileId})
+ """Returns a file by ID if the current user has RBAC access (scope-based)."""
+ filteredFiles = getRecordsetWithRBAC(
+ self.db, FileItem, self.currentUser,
+ recordFilter={"id": fileId},
+ mandateId=self.mandateId,
+ featureInstanceId=self.featureInstanceId,
+ )
if not filteredFiles:
return None
@@ -981,18 +1041,14 @@ class ComponentObjects:
try:
sysCreatedAt = file.get("sysCreatedAt")
if sysCreatedAt is None or not isinstance(sysCreatedAt, (int, float)) or sysCreatedAt <= 0:
- sysCreatedAt = getUtcTimestamp()
+ file["sysCreatedAt"] = getUtcTimestamp()
- return FileItem(
- id=file.get("id"),
- mandateId=file.get("mandateId"),
- featureInstanceId=file.get("featureInstanceId", ""),
- fileName=file.get("fileName"),
- mimeType=file.get("mimeType"),
- fileHash=file.get("fileHash"),
- fileSize=file.get("fileSize"),
- sysCreatedAt=sysCreatedAt,
- )
+ if file.get("scope") is None:
+ file["scope"] = "personal"
+ if file.get("neutralize") is None:
+ file["neutralize"] = False
+
+ return FileItem(**file)
except Exception as e:
logger.error(f"Error converting file record: {str(e)}")
return None
@@ -1071,7 +1127,8 @@ class ComponentObjects:
fileName=uniqueName,
mimeType=mimeType,
fileSize=fileSize,
- fileHash=fileHash
+ fileHash=fileHash,
+ folderId="",
)
# Store in database
@@ -1079,15 +1136,36 @@ class ComponentObjects:
return fileItem
+ def _isFileOwner(self, file) -> bool:
+ """Check if the current user owns the file."""
+ createdBy = getattr(file, "sysCreatedBy", None) or (file.get("sysCreatedBy") if isinstance(file, dict) else None)
+ return createdBy == self.userId
+
+ def _requireFileWriteAccess(self, file, fileId: str, operation: str = "update"):
+ """Raise PermissionError if the user cannot mutate this file.
+ Owners always can. Non-owners need RBAC ALL level."""
+ if self._isFileOwner(file):
+ return
+ from modules.interfaces.interfaceRbac import buildDataObjectKey
+ from modules.datamodels.datamodelRbac import AccessRuleContext
+ objectKey = buildDataObjectKey("FileItem")
+ permissions = self.rbac.getUserPermissions(
+ self.currentUser, AccessRuleContext.DATA, objectKey,
+ mandateId=self.mandateId, featureInstanceId=self.featureInstanceId,
+ )
+ level = getattr(permissions, operation, None)
+ if level != AccessLevel.ALL:
+ raise PermissionError(
+ f"No permission to {operation} file {fileId} (not owner, access level: {level})"
+ )
+
def updateFile(self, fileId: str, updateData: Dict[str, Any]) -> Dict[str, Any]:
- """Updates file metadata if user has access."""
- # Check if the file exists and user has access
+ """Updates file metadata if user has access and is owner (or has ALL)."""
file = self.getFile(fileId)
if not file:
raise FileNotFoundError(f"File with ID {fileId} not found")
- if not self.checkRbacPermission(FileItem, "update", fileId):
- raise PermissionError(f"No permission to update file {fileId}")
+ self._requireFileWriteAccess(file, fileId, "update")
# If fileName is being updated, ensure it's unique
if "fileName" in updateData:
@@ -1100,16 +1178,14 @@ class ComponentObjects:
return success
def deleteFile(self, fileId: str) -> bool:
- """Deletes a file if user has access."""
+ """Deletes a file if user is owner (or has ALL access)."""
try:
- # Check if the file exists and user has access
file = self.getFile(fileId)
if not file:
raise FileNotFoundError(f"File with ID {fileId} not found")
- if not self.checkRbacPermission(FileItem, "update", fileId):
- raise PermissionError(f"No permission to delete file {fileId}")
+ self._requireFileWriteAccess(file, fileId, "delete")
# Check for other references to this file (by hash) - user-scoped check
fileHash = file.fileHash
@@ -1143,7 +1219,8 @@ class ComponentObjects:
raise FileDeletionError(f"Error deleting file: {str(e)}")
def deleteFilesBatch(self, fileIds: List[str]) -> Dict[str, Any]:
- """Delete multiple files in a single SQL batch call."""
+ """Delete multiple files in a single SQL batch call.
+ Owner can always delete; non-owners need RBAC ALL level."""
uniqueIds = [str(fid) for fid in dict.fromkeys(fileIds or []) if fid]
if not uniqueIds:
return {"deletedFiles": 0}
@@ -1152,20 +1229,21 @@ class ComponentObjects:
self.db._ensure_connection()
with self.db.connection.cursor() as cursor:
cursor.execute(
- 'SELECT "id" FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
- (uniqueIds, self.userId or ""),
+ 'SELECT "id", "sysCreatedBy" FROM "FileItem" WHERE "id" = ANY(%s)',
+ (uniqueIds,),
)
- accessibleIds = [row["id"] for row in cursor.fetchall()]
+ rows = cursor.fetchall()
+ foundIds = {row["id"] for row in rows}
+ missing = sorted(set(uniqueIds) - foundIds)
+ if missing:
+ raise FileNotFoundError(f"Files not found: {missing}")
- if len(accessibleIds) != len(uniqueIds):
- missingIds = sorted(set(uniqueIds) - set(accessibleIds))
- raise FileNotFoundError(f"Files not found or not accessible: {missingIds}")
+ for row in rows:
+ self._requireFileWriteAccess(row, row["id"], "delete")
+ accessibleIds = [row["id"] for row in rows]
cursor.execute('DELETE FROM "FileData" WHERE "id" = ANY(%s)', (accessibleIds,))
- cursor.execute(
- 'DELETE FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
- (accessibleIds, self.userId or ""),
- )
+ cursor.execute('DELETE FROM "FileItem" WHERE "id" = ANY(%s)', (accessibleIds,))
deletedFiles = cursor.rowcount
self.db.connection.commit()
@@ -1206,17 +1284,98 @@ class ComponentObjects:
currentId = folders[0].get("parentId")
return False
+ def _ensureFeatureInstanceFolder(self, featureInstanceId: str, mandateId: str = "") -> Optional[str]:
+ """Return the folder ID for a feature instance, creating it on first use.
+ The folder is named after the feature instance label."""
+ existing = self.db.getRecordset(
+ FileFolder,
+ recordFilter={
+ "featureInstanceId": featureInstanceId,
+ "sysCreatedBy": self.userId or "",
+ },
+ )
+ if existing:
+ return existing[0].get("id")
+
+ # Resolve the instance label for the folder name
+ folderName = featureInstanceId[:8]
+ try:
+ from modules.datamodels.datamodelFeatures import FeatureInstance
+ from modules.security.rootAccess import getRootDbAppConnector
+ dbApp = getRootDbAppConnector()
+ instances = dbApp.getRecordset(FeatureInstance, recordFilter={"id": featureInstanceId})
+ if instances:
+ folderName = instances[0].get("label") or folderName
+ except Exception as e:
+ logger.warning(f"Could not resolve feature instance label: {e}")
+
+ folder = FileFolder(
+ name=folderName,
+ parentId=None,
+ mandateId=mandateId,
+ featureInstanceId=featureInstanceId,
+ )
+ created = self.db.recordCreate(FileFolder, folder)
+ return created.get("id") if isinstance(created, dict) else getattr(created, "id", None)
+
def getFolder(self, folderId: str) -> Optional[Dict[str, Any]]:
"""Returns a folder by ID if it belongs to the current user."""
folders = self.db.getRecordset(FileFolder, recordFilter={"id": folderId, "sysCreatedBy": self.userId or ""})
return folders[0] if folders else None
def listFolders(self, parentId: Optional[str] = None) -> List[Dict[str, Any]]:
- """List folders for current user, optionally filtered by parentId."""
- recordFilter = {"sysCreatedBy": self.userId or ""}
+ """List folders visible to the current user.
+ Own folders are always returned. Other users' folders are only
+ returned when they contain files visible to the current user.
+ Each folder is enriched with ``fileCount``."""
+ recordFilter = {}
if parentId is not None:
recordFilter["parentId"] = parentId
- return self.db.getRecordset(FileFolder, recordFilter=recordFilter)
+ folders = self.db.getRecordset(FileFolder, recordFilter=recordFilter if recordFilter else None)
+
+ if not folders:
+ return folders
+
+ folderIds = [f["id"] for f in folders if f.get("id")]
+ fileCounts: Dict[str, int] = {}
+ try:
+ from modules.interfaces.interfaceRbac import _buildFilesScopeWhereClause
+ scopeClause = _buildFilesScopeWhereClause(
+ self.currentUser, "FileItem", self.db,
+ self.mandateId, self.featureInstanceId,
+ [], [],
+ )
+
+ self.db._ensure_connection()
+ with self.db.connection.cursor() as cursor:
+ baseQuery = (
+ 'SELECT "folderId", COUNT(*) AS cnt '
+ 'FROM "FileItem" '
+ 'WHERE "folderId" = ANY(%s)'
+ )
+ queryValues: list = [folderIds]
+
+ if scopeClause:
+ baseQuery += ' AND (' + scopeClause["condition"] + ')'
+ queryValues.extend(scopeClause["values"])
+
+ baseQuery += ' GROUP BY "folderId"'
+ cursor.execute(baseQuery, queryValues)
+ for row in cursor.fetchall():
+ fileCounts[row["folderId"]] = row["cnt"]
+ except Exception as e:
+ logger.warning(f"Could not count files per folder: {e}")
+
+ userId = self.userId or ""
+ result = []
+ for folder in folders:
+ fc = fileCounts.get(folder.get("id", ""), 0)
+ folder["fileCount"] = fc
+ isOwn = folder.get("sysCreatedBy") == userId
+ if isOwn or fc > 0:
+ result.append(folder)
+
+ return result
def createFolder(self, name: str, parentId: Optional[str] = None) -> Dict[str, Any]:
"""Create a new folder with unique name validation."""
@@ -1248,7 +1407,8 @@ class ComponentObjects:
return self.db.recordModify(FileFolder, folderId, {"parentId": targetParentId})
def moveFilesBatch(self, fileIds: List[str], targetFolderId: Optional[str] = None) -> Dict[str, Any]:
- """Move multiple files with one SQL update."""
+ """Move multiple files with one SQL update.
+ Owner can always move; non-owners need RBAC ALL level."""
uniqueIds = [str(fid) for fid in dict.fromkeys(fileIds or []) if fid]
if not uniqueIds:
return {"movedFiles": 0}
@@ -1262,18 +1422,23 @@ class ComponentObjects:
self.db._ensure_connection()
with self.db.connection.cursor() as cursor:
cursor.execute(
- 'SELECT "id" FROM "FileItem" WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
- (uniqueIds, self.userId or ""),
+ 'SELECT "id", "sysCreatedBy" FROM "FileItem" WHERE "id" = ANY(%s)',
+ (uniqueIds,),
)
- accessibleIds = [row["id"] for row in cursor.fetchall()]
- if len(accessibleIds) != len(uniqueIds):
- missingIds = sorted(set(uniqueIds) - set(accessibleIds))
- raise FileNotFoundError(f"Files not found or not accessible: {missingIds}")
+ rows = cursor.fetchall()
+ foundIds = {row["id"] for row in rows}
+ missing = sorted(set(uniqueIds) - foundIds)
+ if missing:
+ raise FileNotFoundError(f"Files not found: {missing}")
+ for row in rows:
+ self._requireFileWriteAccess(row, row["id"], "update")
+
+ accessibleIds = [row["id"] for row in rows]
cursor.execute(
'UPDATE "FileItem" SET "folderId" = %s, "sysModifiedAt" = %s, "sysModifiedBy" = %s '
- 'WHERE "id" = ANY(%s) AND "sysCreatedBy" = %s',
- (targetFolderId, getUtcTimestamp(), self.userId or "", accessibleIds, self.userId or ""),
+ 'WHERE "id" = ANY(%s)',
+ (targetFolderId, getUtcTimestamp(), self.userId or "", accessibleIds),
)
movedFiles = cursor.rowcount
@@ -1555,7 +1720,7 @@ class ComponentObjects:
logger.warning(f"No access to file ID {fileId}")
return None
- fileDataEntries = getRecordsetWithRBAC(self.db, FileData, self.currentUser, recordFilter={"id": fileId}, mandateId=self.mandateId)
+ fileDataEntries = self.db.getRecordset(FileData, recordFilter={"id": fileId})
if not fileDataEntries:
logger.warning(f"No data found for file ID {fileId}")
return None
diff --git a/modules/interfaces/interfaceDbSubscription.py b/modules/interfaces/interfaceDbSubscription.py
index f1d7ccf7..d3943d4b 100644
--- a/modules/interfaces/interfaceDbSubscription.py
+++ b/modules/interfaces/interfaceDbSubscription.py
@@ -375,12 +375,16 @@ class SubscriptionObjects:
itemIdUsers = sub.get("stripeItemIdUsers")
itemIdInstances = sub.get("stripeItemIdInstances")
+ plan = self.getPlan(sub.get("planKey", ""))
+ includedModules = plan.includedModules if plan else 0
+
try:
from modules.shared.stripeClient import getStripeClient
stripe = getStripeClient()
activeUsers = self.countActiveUsers(mandateId)
activeInstances = self.countActiveFeatureInstances(mandateId)
+ billableModules = max(0, activeInstances - includedModules)
if itemIdUsers:
stripe.SubscriptionItem.modify(
@@ -388,10 +392,10 @@ class SubscriptionObjects:
)
if itemIdInstances:
stripe.SubscriptionItem.modify(
- itemIdInstances, quantity=max(activeInstances, 0), proration_behavior="create_prorations",
+ itemIdInstances, quantity=billableModules, proration_behavior="create_prorations",
)
- logger.info("Stripe quantity synced for sub %s: users=%d, instances=%d", subscriptionId, activeUsers, activeInstances)
+ logger.info("Stripe quantity synced for sub %s: users=%d, modules=%d (total=%d, included=%d)", subscriptionId, activeUsers, billableModules, activeInstances, includedModules)
except Exception as e:
logger.error("syncQuantityToStripe(%s) failed: %s", subscriptionId, e)
if raiseOnError:
diff --git a/modules/interfaces/interfaceFeatures.py b/modules/interfaces/interfaceFeatures.py
index 6616218d..943acdb5 100644
--- a/modules/interfaces/interfaceFeatures.py
+++ b/modules/interfaces/interfaceFeatures.py
@@ -15,7 +15,9 @@ from typing import List, Dict, Any, Optional
from modules.datamodels.datamodelFeatures import Feature, FeatureInstance
from modules.datamodels.datamodelRbac import Role, AccessRule
+from modules.datamodels.datamodelUtils import coerce_text_multilingual
from modules.connectors.connectorDbPostgre import DatabaseConnector
+from modules.shared.i18nRegistry import resolveText
logger = logging.getLogger(__name__)
@@ -198,6 +200,9 @@ class FeatureInterface:
# Copy template roles if requested
if copyTemplateRoles:
self._copyTemplateRoles(featureCode, mandateId, instanceId)
+
+ # Copy template workflows (if feature defines TEMPLATE_WORKFLOWS)
+ self._copyTemplateWorkflows(featureCode, mandateId, instanceId)
cleanedRecord = dict(createdInstance)
return FeatureInstance(**cleanedRecord)
@@ -206,6 +211,71 @@ class FeatureInterface:
logger.error(f"Error creating feature instance: {e}")
raise ValueError(f"Failed to create feature instance: {e}")
+ def _copyTemplateWorkflows(self, featureCode: str, mandateId: str, instanceId: str) -> int:
+ """
+ Copy feature-specific template workflows to a new instance.
+
+ Loads TEMPLATE_WORKFLOWS from the feature module and creates
+ AutoWorkflow records in the graphicalEditor DB, scoped to
+ (mandateId, instanceId). The placeholder {{featureInstanceId}}
+ in graph parameters is replaced with the actual instanceId.
+
+ Args:
+ featureCode: Feature code (e.g. "trustee")
+ mandateId: Mandate ID
+ instanceId: New FeatureInstance ID
+
+ Returns:
+ Number of workflows copied
+ """
+ import json
+ import importlib
+
+ try:
+ featureModule = importlib.import_module(f"modules.features.{featureCode}.main{featureCode.capitalize()}")
+ getTemplateWorkflows = getattr(featureModule, "getTemplateWorkflows", None)
+ if not getTemplateWorkflows:
+ return 0
+
+ templateWorkflows = getTemplateWorkflows()
+ if not templateWorkflows:
+ return 0
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ rootUser = getRootInterface().currentUser
+ geInterface = getGraphicalEditorInterface(rootUser, mandateId, instanceId)
+
+ copied = 0
+ for template in templateWorkflows:
+ graphJson = json.dumps(template.get("graph", {}))
+ graphJson = graphJson.replace("{{featureInstanceId}}", instanceId)
+ graph = json.loads(graphJson)
+
+ label = resolveText(template.get("label"))
+
+ geInterface.createWorkflow({
+ "label": label,
+ "graph": graph,
+ "tags": template.get("tags", [f"feature:{featureCode}"]),
+ "isTemplate": False,
+ "templateSourceId": template["id"],
+ "templateScope": "instance",
+ "active": True,
+ })
+ copied += 1
+
+ if copied > 0:
+ logger.info(f"Feature '{featureCode}': Copied {copied} template workflows to instance {instanceId}")
+ return copied
+
+ except ImportError:
+ logger.debug(f"No feature module found for '{featureCode}' — skipping workflow bootstrap")
+ return 0
+ except Exception as e:
+ logger.warning(f"Error copying template workflows for '{featureCode}' instance {instanceId}: {e}")
+ return 0
+
def _copyTemplateRoles(self, featureCode: str, mandateId: str, instanceId: str) -> int:
"""
Copy feature-specific template roles to a new instance.
@@ -268,7 +338,7 @@ class FeatureInterface:
newRole = Role(
id=newRoleId,
roleLabel=templateRole.get("roleLabel"),
- description=templateRole.get("description", {}),
+ description=coerce_text_multilingual(templateRole.get("description", {})),
featureCode=featureCode,
mandateId=mandateId,
featureInstanceId=instanceId,
@@ -354,7 +424,7 @@ class FeatureInterface:
newRole = Role(
id=newRoleId,
roleLabel=templateRole.get("roleLabel"),
- description=templateRole.get("description", {}),
+ description=coerce_text_multilingual(templateRole.get("description", {})),
featureCode=featureCode,
mandateId=mandateId,
featureInstanceId=featureInstanceId,
diff --git a/modules/interfaces/interfaceRbac.py b/modules/interfaces/interfaceRbac.py
index 947a6e2d..b8a87ba9 100644
--- a/modules/interfaces/interfaceRbac.py
+++ b/modules/interfaces/interfaceRbac.py
@@ -17,7 +17,8 @@ Data Namespace Structure:
GROUP-Berechtigung:
- data.uam.*: GROUP filtert nach Mandant (via UserMandate)
-- data.chat.*, data.files.*, data.automation.*: GROUP = MY (benutzer-eigen); bei gesetztem featureInstanceId zusätzlich sysCreatedBy
+- data.chat.*, data.automation.*: GROUP = MY (benutzer-eigen); bei gesetztem featureInstanceId zusätzlich sysCreatedBy
+- data.files.*: GROUP = eigene Files + scope-basierte Sichtbarkeit (global, mandate, featureInstance)
- data.feature.*: GROUP filtert nach mandateId/featureInstanceId
"""
@@ -74,10 +75,16 @@ TABLE_NAMESPACE = {
# Automation - benutzer-eigen
"AutomationDefinition": "automation",
"AutomationTemplate": "automation",
- # Automation2 - feature-scoped
- "Automation2Workflow": "automation2",
- "Automation2WorkflowRun": "automation2",
- "Automation2HumanTask": "automation2",
+ # GraphicalEditor - Greenfield DB poweron_graphicaleditor (Auto-prefix models)
+ "AutoWorkflow": "feature.graphicalEditor",
+ "AutoVersion": "feature.graphicalEditor",
+ "AutoRun": "feature.graphicalEditor",
+ "AutoStepLog": "feature.graphicalEditor",
+ "AutoTask": "feature.graphicalEditor",
+ # Legacy aliases (backward compat)
+ "Automation2Workflow": "feature.graphicalEditor",
+ "Automation2WorkflowRun": "feature.graphicalEditor",
+ "Automation2HumanTask": "feature.graphicalEditor",
# Knowledge Store - benutzer-eigen
"FileContentIndex": "knowledge",
"ContentChunk": "knowledge",
@@ -87,7 +94,8 @@ TABLE_NAMESPACE = {
}
# Namespaces ohne Mandantenkontext - GROUP wird auf MY gemappt
-USER_OWNED_NAMESPACES = {"chat", "chatbot", "files", "automation", "knowledge", "datasource"}
+# NOTE: "files" is NOT in this set – files use scope-based visibility for GROUP
+USER_OWNED_NAMESPACES = {"chat", "chatbot", "automation", "knowledge", "datasource"}
def buildDataObjectKey(tableName: str, featureCode: Optional[str] = None) -> str:
@@ -606,6 +614,82 @@ def getDistinctColumnValuesWithRBAC(
return []
+def _buildFilesScopeWhereClause(
+ currentUser: User,
+ table: str,
+ connector,
+ mandateId: Optional[str],
+ featureInstanceId: Optional[str],
+ baseConditions: List[str],
+ baseValues: List,
+) -> Optional[Dict[str, Any]]:
+ """Build WHERE clause for files namespace with scope-based visibility.
+
+ Two modes depending on request context:
+
+ WITHOUT instance/mandate context (Dateien-Seite):
+ Only own files: sysCreatedBy = currentUser
+
+ WITH instance context (Instanz-Seiten):
+ - sysCreatedBy = me AND featureInstanceId = X (own personal files of this instance)
+ - scope = 'featureInstance' AND featureInstanceId = X
+ - scope = 'mandate' AND mandateId = M (M = mandate of the instance)
+ - scope = 'global'
+ """
+ conditions = list(baseConditions)
+ values = list(baseValues)
+
+ # ── No context: Dateien-Seite → only own files ────────────────────────
+ if not featureInstanceId and not mandateId:
+ conditions.append('"sysCreatedBy" = %s')
+ values.append(currentUser.id)
+ if conditions:
+ return {"condition": " AND ".join(conditions), "values": values}
+ return None
+
+ # ── With context: Instanz-/Mandanten-Seite → scope-based visibility ──
+ effectiveMandateId = mandateId
+ if featureInstanceId and not effectiveMandateId:
+ try:
+ from modules.datamodels.datamodelFeatures import FeatureInstance
+ dbApp = getRootDbAppConnector()
+ instances = dbApp.getRecordset(
+ FeatureInstance, recordFilter={"id": featureInstanceId},
+ )
+ if instances:
+ effectiveMandateId = instances[0].get("mandateId") or ""
+ except Exception as e:
+ logger.warning(f"_buildFilesScopeWhereClause: could not resolve mandate for instance {featureInstanceId}: {e}")
+
+ scopeParts: List[str] = []
+ scopeValues: List = []
+
+ if featureInstanceId:
+ # 1) Own personal files of this specific instance
+ scopeParts.append('("sysCreatedBy" = %s AND "featureInstanceId" = %s)')
+ scopeValues.extend([currentUser.id, featureInstanceId])
+
+ # 2) scope=featureInstance files shared with this instance
+ scopeParts.append('("scope" = \'featureInstance\' AND "featureInstanceId" = %s)')
+ scopeValues.append(featureInstanceId)
+
+ # 3) scope=mandate files of the effective mandate
+ if effectiveMandateId:
+ scopeParts.append('("scope" = \'mandate\' AND "mandateId" = %s)')
+ scopeValues.append(effectiveMandateId)
+
+ # 4) scope=global files
+ scopeParts.append('"scope" = \'global\'')
+
+ if scopeParts:
+ conditions.append("(" + " OR ".join(scopeParts) + ")")
+ values.extend(scopeValues)
+
+ if conditions:
+ return {"condition": " AND ".join(conditions), "values": values}
+ return None
+
+
def buildRbacWhereClause(
permissions: UserPermissions,
currentUser: User,
@@ -642,20 +726,29 @@ def buildRbacWhereClause(
return {"condition": "1 = 0", "values": []}
# CRITICAL: featureInstanceId filter is ALWAYS required when provided
- # This ensures data isolation between feature instances regardless of access level
+ # This ensures data isolation between feature instances regardless of access level.
+ # EXCEPTION: files namespace handles featureInstanceId inside its own scope logic
+ # because files with scope=global or scope=mandate must remain visible even when
+ # they belong to a different (or no) featureInstanceId.
baseConditions = []
baseValues = []
- if featureInstanceId:
- # Strict filter: only records for this exact feature instance
+ namespace = TABLE_NAMESPACE.get(table, "system")
+ if featureInstanceId and namespace != "files":
baseConditions.append('"featureInstanceId" = %s')
baseValues.append(featureInstanceId)
# All records within the feature instance - only featureInstanceId filtering
if readLevel == AccessLevel.ALL:
+ namespaceAll = TABLE_NAMESPACE.get(table, "system")
+ # Files: scope-based context filtering applies even with ALL access
+ if namespaceAll == "files":
+ return _buildFilesScopeWhereClause(
+ currentUser, table, connector, mandateId, featureInstanceId,
+ baseConditions, baseValues,
+ )
# Chat / AI Workspace: even DATA read ALL must not list other users' rows in a
# shared featureInstance (stale RBAC rules or merged roles). Same as MY.
- namespaceAll = TABLE_NAMESPACE.get(table, "system")
if featureInstanceId and namespaceAll == "chat":
userIdFieldAll = "sysCreatedBy"
if table == "UserInDB":
@@ -697,7 +790,19 @@ def buildRbacWhereClause(
# Determine namespace for this table
namespace = TABLE_NAMESPACE.get(table, "system")
- # For user-owned namespaces (chat, files, automation):
+ # ── Files namespace: scope-based visibility ──────────────────────
+ # GROUP for files = own files + shared files based on scope field:
+ # - scope='global' → visible to everyone
+ # - scope='mandate' → visible to users in that mandate
+ # - scope='featureInstance' → visible to users with access to that instance
+ # - scope='personal' → only visible to owner (sysCreatedBy)
+ if namespace == "files":
+ return _buildFilesScopeWhereClause(
+ currentUser, table, connector, mandateId, featureInstanceId,
+ baseConditions, baseValues,
+ )
+
+ # For user-owned namespaces (chat, automation):
# GROUP has no meaning - these tables have no mandate context
# But still apply featureInstanceId filter if provided
if namespace in USER_OWNED_NAMESPACES:
diff --git a/modules/migration/migrateRootUsers.py b/modules/migration/migrateRootUsers.py
index 11424987..ebcb1a3e 100644
--- a/modules/migration/migrateRootUsers.py
+++ b/modules/migration/migrateRootUsers.py
@@ -242,7 +242,7 @@ def migrateRootUsers(db, dryRun: bool = False) -> dict:
result = rootInterface._provisionMandateForUser(
userId=userId,
mandateName=f"Home {username}",
- planKey="TRIAL_7D",
+ planKey="TRIAL_14D",
)
targetMandateId = result["mandateId"]
stats["mandatesCreated"] += 1
diff --git a/modules/migration/seedData/ui_language_seed.json b/modules/migration/seedData/ui_language_seed.json
new file mode 100644
index 00000000..2d2193c8
--- /dev/null
+++ b/modules/migration/seedData/ui_language_seed.json
@@ -0,0 +1,13204 @@
+[
+ {
+ "id": "xx",
+ "label": "Basisset (Meta)",
+ "entries": [
+ {
+ "context": "ui",
+ "key": "+41 123 456 789",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "1 Benutzer ausgewählt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "ABGEBROCHEN",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "ABGESCHLOSSEN",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Abbrechen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Abgeschlossen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Abmelden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Admin-Einstellungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Administrative Einstellungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Administrator",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Adresse",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Agent Assist (AA)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aktionen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aktiv",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aktiviert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aktualisieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aktuelle Transkripte",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle Dateien",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle Daten als CSV exportieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle Elemente auswählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle abwählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle aktualisieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle auswählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Alle {count} Elemente löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Analysiere Workflow...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Anmelden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Anrufer",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigename",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Audio",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Auf Standard zurücksetzen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aufgaben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ausführen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ausgewählte Datei:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Auth-Anbieter",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungsanbieter",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierung erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Basisdaten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bearbeiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Beginnen Sie mit:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein Fehler aufgetreten.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Belege verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer auswählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer bearbeiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Zugriff verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerdefinierter Titel (optional)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen erfolgreich aktualisiert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzername",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigungsstufe",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung der Rolle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Betrachter",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Betreff",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bezeichnung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bild",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bis",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bitte geben Sie eine gültige E-Mail-Adresse ein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Bitte wählen Sie mindestens einen Benutzer aus",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Branche",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Branche ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Buchungsbetrag",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Buchungspositionen verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Buchungswährung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Chat Platform (CP)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Chat leeren...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Chatbereich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Darstellung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei anhängen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei bearbeiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei bereits vorhanden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei entfernen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei erfolgreich hochgeladen!",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei herunterladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei hier ablegen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei hochladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei vorschauen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datei-Ablage während Workflow deaktiviert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien anhängen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien auswählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen zum Anhängen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hierher ziehen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hochladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden verarbeitet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateigröße",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateiname",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateityp",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateiverwaltung - Dokumente hochladen und organisieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dateivorschau",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Daten aktualisieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Daten empfangen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Daten gesendet",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung - Datenimporte und -exporte verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung mit Tabellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Datum",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dauer",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Details",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Deutsch",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element auswählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element kann nicht ausgewählt werden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokument",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokument erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokument herunterladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokument vorschauen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente auflisten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dokumentname",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Dunkel",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Durchsuchen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Bestätigung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Echtzeit-Datensynchronisation:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Eigenschaften",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Eingereichte Daten:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einrichtungsanruf",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen erfolgreich gespeichert!",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen werden in zukünftigen Updates hinzugefügt.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen wurden erfolgreich zurückgesetzt.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einträge",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einträge pro Seite:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Empfänger",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Endzeit",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "English",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Entdeckte Sites",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Entfernen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgreich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgsrate",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erneut versuchen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erste Seite",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erstellte Dateien",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Erstellungsdatum",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Exportiere...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail-Adresse eingeben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Externen Benutzernamen eingeben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Externer Benutzername",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "FEHLER",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "FEHLGESCHLAGEN",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Aktualisieren der Benutzerinformationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Automatisierung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Organisation",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Position",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der RBAC-Regel",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Rolle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Dokuments",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Mandats",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Prompts",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Team-Mitglieds",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Vertrags",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Zugriffs",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzerinformationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Dateien:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Logs",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Nachrichten:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der SharePoint Dokumente:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Vorschau",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Workflows:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Teilen des Prompts",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Verarbeiten der Dateien",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehlgeschlagen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Filter",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Filter löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Filter: {value}",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Firma",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Folgenachricht wird gesendet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fortfahren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fortsetzen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fragen?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Français",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fügen Sie eine Nachricht für die Empfänger hinzu",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "GESTOPPT",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Ihren Firmennamen ein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie den Inhalt des Prompts ein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen Namen für den Prompt ein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen benutzerdefinierten Titel ein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geplante und automatisierte Workflows",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten & Zeitzone",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Gespräch fortsetzen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Gestoppt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geteilt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Geteilte Dateien",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Globale Sprachsets verwalten (SysAdmin).",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Google",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Google verbinden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Grundlegende Daten und Ressourcen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Größe",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Hell",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Herunterladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Hochgeladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Hochladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "ID",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "INFO",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ihre Anfrage wird verarbeitet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "In die Zwischenablage kopiert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Inaktiv",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Information",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ja",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt anmelden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt überspringen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "KI-erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "KI-gestützte Dokumentengenerierung:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kein Auth-Anbieter",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kein Benutzername",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kein Nachrichteninhalt verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kein Name",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kein Workflow ausgewählt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Benutzer verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung zum Löschen des Prompts",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Dateien gefunden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Daten verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine E-Mail",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Einträge",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Logs für diesen Workflow verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Optionen verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Prompts verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine SharePoint-Sites gefunden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprache",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Transkripte vorhanden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Vorschau verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows gefunden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine hochgeladenen Dateien gefunden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine mit Ihnen geteilten Dateien gefunden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Keine von der KI erstellten Dateien gefunden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen der Löschung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie, um zu öffnen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Knowledge Agent (KA)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kontakte einrichten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kontaktinformationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kontostatus",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kopieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kopiert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kosteneinsparungen & Effizienz:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Kundenverträge verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Lade Filterwerte...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Lade Fortschritt...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Laden...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Land",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Land ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Leer = Zugriff auf alle Verträge",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Seite",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Link konnte nicht gesendet werden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Log",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Logs konnten nicht geladen werden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Logs werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Lokal",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "LÄUFT",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Lädt hoch...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Läuft",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Läuft ab am",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Löschen ({count})",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Löschen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "MIME-Typ",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Management-Tools umfassen:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich eingereicht!",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandat hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandat-ID",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandate",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandate und Berechtigungen verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandatsverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mehr erfahren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Meine Uploads",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft Verbindungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft verbinden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mitglied hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "MwSt %",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "MwSt Betrag",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nach unten scrollen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht (optional)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht eingeben...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht wird gesendet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nachrichten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nahtloser Mandanten-Workflow:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Name",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Name des Unternehmens",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Name ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Navigation - Erkunden Sie alle verfügbaren Tools",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neu starten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Datei hochladen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue RBAC-Regel erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neue Sprache",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Prompt erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Vertrag erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Zugriff erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Prompt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Vertrag",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Zugriff",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neues Mandat erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neues Team-Mitglied erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Neues Transkript",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nicht verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Noch keinen Workflow ausgewählt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nochmal versuchen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Nächste Seite",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Oder geben Sie Ihre Nachricht ein...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ordnerpfade",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Organisation",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Organisation erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Organisationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Originalbetrag",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Originalwährung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "PDF",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Passwort",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Passwort eingeben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link gesendet!",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link senden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Pfad",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Position erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Positionen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Projekte",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung und -organisation",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Einstellungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Vorlage",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt ausführen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt auswählen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt bearbeiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erstellen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt teilen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt wird gelöscht...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf nicht leer sein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf 100 Zeichen nicht überschreiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf nicht leer sein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompts",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompts für Ihren KI-Assistenten erstellen und verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompts verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Prompts werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Python",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Quelle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regeln",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regelverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollenverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Registrieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Revolutionäre Telefonie-Integration mit Spitch.ai",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rohtext in die Zwischenablage kopieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rolle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rolle erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rolle hinzufügen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rollen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rollen-ID",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rollenbasierte Zugriffssteuerungsregeln",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rollenverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Rufname am Telefon",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Runde",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Runden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Schließen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff - Springen Sie zu häufig verwendeten Features",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Seite",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Seite {page} von {total} ({count} Einträge)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Senden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Service",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Service-Verbindungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Dokumente",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Site URL",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Test",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sie können auch auf den Upload-Button klicken",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie das ausgewählte Element löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sites entdecken",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sortierung {position}: {direction}",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Speech Analytics (SA)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Speichern",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Speichern...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sprach Integration",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Einstellungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Integration Einstellungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sprache",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Sprachset {code} wirklich löschen?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Stadt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Stadt ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Start",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Startzeit",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Status",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Stoppen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Straße",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Straße ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Suchen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Systemadministrator",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Tabelle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Tags",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Team-Bereich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglied erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Teilen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Telefon",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Text",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Textvorschau",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Theme",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Token",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Transkript",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Transkript wird verarbeitet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Transkriptverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Trennungsfehler",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Treuhand",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Treuhandverwaltung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Organisationen verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Rollen verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Typ",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "UI-Sprachen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannte Größe",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unbekanntes Datum",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannter Workflow",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ungültige Auswahl",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ungültige URL",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges Datum",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges Datumsformat",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges E-Mail-Format",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges JSON",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unternehmensinformationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Unterstützt von",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "VERARBEITUNG",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Valutadatum",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verarbeitung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbinden",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung aktualisieren",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung testen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungsfehler",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verbunden am",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Tools",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Workflows",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Version",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag (optional)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verträge",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Kontoinformationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Service-Verbindungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Mandate und deren zugehörige Berechtigungen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltet von {provider}",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Benutzerzugriffe auf Organisationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Buchungspositionen (Speseneinträge)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Dokumente und Belege",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Feature-spezifischen Rollen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Kundenverträge",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Treuhand-Organisationen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltungs- und Management-Tools",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Verwende Vorlage:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Video",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Virtual Assistant (VA)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Voice Biometrics (VB)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vollständiger Name",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Von",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vorherige Seite",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau für diesen Dateityp nicht verfügbar",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau schließen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau wird geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "WARTEND",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wartend",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Was passiert als nächstes?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wechseln Sie zwischen hellem und dunklem Modus",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge und Hilfsmittel",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wie möchten Sie am Telefon genannt werden?",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wiederholen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Willkommen in Ihrem Arbeitsbereich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wird gesendet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wird gestoppt...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wird geteilt...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wird hochgeladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wird verarbeitet...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow Fortschritt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow auswählen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fehlgeschlagen.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fortsetzen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow läuft... Warte auf Logs...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow löschen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow stoppen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird fortgesetzt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird gelöscht...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Automatisierungen verwalten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Nachrichten werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Verlauf",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflows",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Workflows werden geladen...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Wählen Sie Ihre bevorzugte Sprache",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "You",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zeitzone",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zentrale",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zu dunklem Modus wechseln",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zu hellem Modus wechseln",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff erfolgreich erstellt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff verweigert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zuletzt geprüft",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken...",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zum Ein-/Ausklappen klicken",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zum Filtern klicken",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zum Sortieren klicken",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Zurück zur Sprach Integration",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "angehängt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "ausgewählt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "k. A.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "kontakt@firma.com",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "oder",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Beleg.pdf",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Finanzdienstleistungen, Technologie, etc.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Muster AG 2026",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Treuhand AG Zürich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "z.B. admin, operate, userreport",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "z.B. treuhand-ag-zuerich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "{authority} Verbindung bearbeiten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "{column} filtern",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "{count} Benutzer ausgewählt",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "{fieldLabel} ist erforderlich",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "{fieldLabel} muss eine gültige Ganzzahl sein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "{fieldLabel} muss eine gültige Zahl sein",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Änderungen speichern",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Über",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Überprüfungsprozess",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "(gefiltert nach {name})",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "({count} gefiltert)",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Abonnement, Einstellungen und Guthaben pro Mandant",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Abrechnung",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Aktion",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Billing",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Guthaben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Deaktiviert",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Du hast Zugriff auf {instanceCount} {instanceWord} in {mandateCount} {mandateWord}.",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen gespeichert!",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanz",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanzen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Gesamtguthaben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandant:",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Billing",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Guthaben",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Mandant",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Niedrig",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Transaktionen",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "Warnschwelle",
+ "value": ""
+ },
+ {
+ "context": "ui",
+ "key": "✓ Mandat eingereicht",
+ "value": ""
+ }
+ ],
+ "status": "complete",
+ "isDefault": true
+ },
+ {
+ "id": "de",
+ "label": "Deutsch",
+ "entries": [
+ {
+ "context": "ui",
+ "key": "+41 123 456 789",
+ "value": "+41 123 456 789"
+ },
+ {
+ "context": "ui",
+ "key": "1 Benutzer ausgewählt",
+ "value": "1 Benutzer ausgewählt"
+ },
+ {
+ "context": "ui",
+ "key": "ABGEBROCHEN",
+ "value": "ABGEBROCHEN"
+ },
+ {
+ "context": "ui",
+ "key": "ABGESCHLOSSEN",
+ "value": "ABGESCHLOSSEN"
+ },
+ {
+ "context": "ui",
+ "key": "Abbrechen",
+ "value": "Abbrechen"
+ },
+ {
+ "context": "ui",
+ "key": "Abgeschlossen",
+ "value": "Abgeschlossen"
+ },
+ {
+ "context": "ui",
+ "key": "Abmelden",
+ "value": "Abmelden"
+ },
+ {
+ "context": "ui",
+ "key": "Admin-Einstellungen",
+ "value": "Admin-Einstellungen"
+ },
+ {
+ "context": "ui",
+ "key": "Administrative Einstellungen",
+ "value": "Administrative Einstellungen"
+ },
+ {
+ "context": "ui",
+ "key": "Administrator",
+ "value": "Administrator"
+ },
+ {
+ "context": "ui",
+ "key": "Adresse",
+ "value": "Adresse"
+ },
+ {
+ "context": "ui",
+ "key": "Agent Assist (AA)",
+ "value": "Agent Assist (AA)"
+ },
+ {
+ "context": "ui",
+ "key": "Aktionen",
+ "value": "Aktionen"
+ },
+ {
+ "context": "ui",
+ "key": "Aktiv",
+ "value": "Aktiv"
+ },
+ {
+ "context": "ui",
+ "key": "Aktiviert",
+ "value": "Aktiviert"
+ },
+ {
+ "context": "ui",
+ "key": "Aktualisieren",
+ "value": "Aktualisieren"
+ },
+ {
+ "context": "ui",
+ "key": "Aktuelle Transkripte",
+ "value": "Aktuelle Transkripte"
+ },
+ {
+ "context": "ui",
+ "key": "Alle",
+ "value": "Alle"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Dateien",
+ "value": "Alle Dateien"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Daten als CSV exportieren",
+ "value": "Alle Daten als CSV exportieren"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Elemente auswählen",
+ "value": "Alle Elemente auswählen"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?",
+ "value": "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?"
+ },
+ {
+ "context": "ui",
+ "key": "Alle abwählen",
+ "value": "Alle abwählen"
+ },
+ {
+ "context": "ui",
+ "key": "Alle aktualisieren",
+ "value": "Alle aktualisieren"
+ },
+ {
+ "context": "ui",
+ "key": "Alle auswählen",
+ "value": "Alle auswählen"
+ },
+ {
+ "context": "ui",
+ "key": "Alle {count} Elemente löschen",
+ "value": "Alle {count} Elemente löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Analysiere Workflow...",
+ "value": "Analysiere Workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Anmelden",
+ "value": "Anmelden"
+ },
+ {
+ "context": "ui",
+ "key": "Anrufer",
+ "value": "Anrufer"
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigen",
+ "value": "Anzeigen"
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigename",
+ "value": "Anzeigename"
+ },
+ {
+ "context": "ui",
+ "key": "Audio",
+ "value": "Audio"
+ },
+ {
+ "context": "ui",
+ "key": "Auf Standard zurücksetzen",
+ "value": "Auf Standard zurücksetzen"
+ },
+ {
+ "context": "ui",
+ "key": "Aufgaben",
+ "value": "Aufgaben"
+ },
+ {
+ "context": "ui",
+ "key": "Ausführen",
+ "value": "Ausführen"
+ },
+ {
+ "context": "ui",
+ "key": "Ausgewählte Datei:",
+ "value": "Ausgewählte Datei:"
+ },
+ {
+ "context": "ui",
+ "key": "Auth-Anbieter",
+ "value": "Auth-Anbieter"
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungsanbieter",
+ "value": "Authentifizierungsanbieter"
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.",
+ "value": "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut."
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierung erfolgreich erstellt",
+ "value": "Automatisierung erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierungen",
+ "value": "Automatisierungen"
+ },
+ {
+ "context": "ui",
+ "key": "Basisdaten",
+ "value": "Basisdaten"
+ },
+ {
+ "context": "ui",
+ "key": "Bearbeiten",
+ "value": "Bearbeiten"
+ },
+ {
+ "context": "ui",
+ "key": "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")",
+ "value": "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")"
+ },
+ {
+ "context": "ui",
+ "key": "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …",
+ "value": "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …"
+ },
+ {
+ "context": "ui",
+ "key": "Beginnen Sie mit:",
+ "value": "Beginnen Sie mit:"
+ },
+ {
+ "context": "ui",
+ "key": "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.",
+ "value": "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration."
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein Fehler aufgetreten.",
+ "value": "Beim Hochladen ist ein Fehler aufgetreten."
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.",
+ "value": "Beim Hochladen ist ein unerwarteter Fehler aufgetreten."
+ },
+ {
+ "context": "ui",
+ "key": "Belege verwalten",
+ "value": "Belege verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer",
+ "value": "Benutzer"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer auswählen",
+ "value": "Benutzer auswählen"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer bearbeiten",
+ "value": "Benutzer bearbeiten"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer erstellen",
+ "value": "Benutzer erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer hinzufügen",
+ "value": "Benutzer hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer löschen",
+ "value": "Benutzer löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer werden geladen...",
+ "value": "Benutzer werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Zugriff verwalten",
+ "value": "Benutzer-Zugriff verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerdefinierter Titel (optional)",
+ "value": "Benutzerdefinierter Titel (optional)"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen",
+ "value": "Benutzerinformationen"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen erfolgreich aktualisiert",
+ "value": "Benutzerinformationen erfolgreich aktualisiert"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen werden geladen...",
+ "value": "Benutzerinformationen werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Benutzername",
+ "value": "Benutzername"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten",
+ "value": "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigung",
+ "value": "Berechtigung"
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigungsstufe",
+ "value": "Berechtigungsstufe"
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung",
+ "value": "Beschreibung"
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung der Rolle",
+ "value": "Beschreibung der Rolle"
+ },
+ {
+ "context": "ui",
+ "key": "Betrachter",
+ "value": "Betrachter"
+ },
+ {
+ "context": "ui",
+ "key": "Betreff",
+ "value": "Betreff"
+ },
+ {
+ "context": "ui",
+ "key": "Bezeichnung",
+ "value": "Bezeichnung"
+ },
+ {
+ "context": "ui",
+ "key": "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.",
+ "value": "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein."
+ },
+ {
+ "context": "ui",
+ "key": "Bild",
+ "value": "Bild"
+ },
+ {
+ "context": "ui",
+ "key": "Bis",
+ "value": "Bis"
+ },
+ {
+ "context": "ui",
+ "key": "Bitte geben Sie eine gültige E-Mail-Adresse ein",
+ "value": "Bitte geben Sie eine gültige E-Mail-Adresse ein"
+ },
+ {
+ "context": "ui",
+ "key": "Bitte wählen Sie mindestens einen Benutzer aus",
+ "value": "Bitte wählen Sie mindestens einen Benutzer aus"
+ },
+ {
+ "context": "ui",
+ "key": "Branche",
+ "value": "Branche"
+ },
+ {
+ "context": "ui",
+ "key": "Branche ist erforderlich",
+ "value": "Branche ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungsbetrag",
+ "value": "Buchungsbetrag"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungspositionen verwalten",
+ "value": "Buchungspositionen verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungswährung",
+ "value": "Buchungswährung"
+ },
+ {
+ "context": "ui",
+ "key": "Chat Platform (CP)",
+ "value": "Chat Platform (CP)"
+ },
+ {
+ "context": "ui",
+ "key": "Chat leeren...",
+ "value": "Chat leeren..."
+ },
+ {
+ "context": "ui",
+ "key": "Chatbereich",
+ "value": "Chatbereich"
+ },
+ {
+ "context": "ui",
+ "key": "Darstellung",
+ "value": "Darstellung"
+ },
+ {
+ "context": "ui",
+ "key": "Datei",
+ "value": "Datei"
+ },
+ {
+ "context": "ui",
+ "key": "Datei anhängen",
+ "value": "Datei anhängen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei bearbeiten",
+ "value": "Datei bearbeiten"
+ },
+ {
+ "context": "ui",
+ "key": "Datei bereits vorhanden",
+ "value": "Datei bereits vorhanden"
+ },
+ {
+ "context": "ui",
+ "key": "Datei entfernen",
+ "value": "Datei entfernen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei erfolgreich hochgeladen!",
+ "value": "Datei erfolgreich hochgeladen!"
+ },
+ {
+ "context": "ui",
+ "key": "Datei herunterladen",
+ "value": "Datei herunterladen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei hier ablegen...",
+ "value": "Datei hier ablegen..."
+ },
+ {
+ "context": "ui",
+ "key": "Datei hinzufügen",
+ "value": "Datei hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei hochladen",
+ "value": "Datei hochladen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei löschen",
+ "value": "Datei löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei vorschauen",
+ "value": "Datei vorschauen"
+ },
+ {
+ "context": "ui",
+ "key": "Datei-Ablage während Workflow deaktiviert",
+ "value": "Datei-Ablage während Workflow deaktiviert"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien",
+ "value": "Dateien"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien anhängen",
+ "value": "Dateien anhängen"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien auswählen",
+ "value": "Dateien auswählen"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen",
+ "value": "Dateien hier ablegen"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen zum Anhängen",
+ "value": "Dateien hier ablegen zum Anhängen"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hierher ziehen",
+ "value": "Dateien hierher ziehen"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hochladen",
+ "value": "Dateien hochladen"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden geladen...",
+ "value": "Dateien werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden verarbeitet...",
+ "value": "Dateien werden verarbeitet..."
+ },
+ {
+ "context": "ui",
+ "key": "Dateigröße",
+ "value": "Dateigröße"
+ },
+ {
+ "context": "ui",
+ "key": "Dateiname",
+ "value": "Dateiname"
+ },
+ {
+ "context": "ui",
+ "key": "Dateityp",
+ "value": "Dateityp"
+ },
+ {
+ "context": "ui",
+ "key": "Dateiverwaltung - Dokumente hochladen und organisieren",
+ "value": "Dateiverwaltung - Dokumente hochladen und organisieren"
+ },
+ {
+ "context": "ui",
+ "key": "Dateivorschau",
+ "value": "Dateivorschau"
+ },
+ {
+ "context": "ui",
+ "key": "Daten aktualisieren",
+ "value": "Daten aktualisieren"
+ },
+ {
+ "context": "ui",
+ "key": "Daten empfangen",
+ "value": "Daten empfangen"
+ },
+ {
+ "context": "ui",
+ "key": "Daten gesendet",
+ "value": "Daten gesendet"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung",
+ "value": "Datenverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung - Datenimporte und -exporte verwalten",
+ "value": "Datenverwaltung - Datenimporte und -exporte verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung mit Tabellen",
+ "value": "Datenverwaltung mit Tabellen"
+ },
+ {
+ "context": "ui",
+ "key": "Datum",
+ "value": "Datum"
+ },
+ {
+ "context": "ui",
+ "key": "Dauer",
+ "value": "Dauer"
+ },
+ {
+ "context": "ui",
+ "key": "Details",
+ "value": "Details"
+ },
+ {
+ "context": "ui",
+ "key": "Deutsch",
+ "value": "Deutsch"
+ },
+ {
+ "context": "ui",
+ "key": "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.",
+ "value": "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet."
+ },
+ {
+ "context": "ui",
+ "key": "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?",
+ "value": "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?"
+ },
+ {
+ "context": "ui",
+ "key": "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.",
+ "value": "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools."
+ },
+ {
+ "context": "ui",
+ "key": "Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": "Diese Aktion kann nicht rückgängig gemacht werden."
+ },
+ {
+ "context": "ui",
+ "key": "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.",
+ "value": "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich."
+ },
+ {
+ "context": "ui",
+ "key": "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.",
+ "value": "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich."
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element auswählen",
+ "value": "Dieses Element auswählen"
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element kann nicht ausgewählt werden",
+ "value": "Dieses Element kann nicht ausgewählt werden"
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden",
+ "value": "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument",
+ "value": "Dokument"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument erfolgreich erstellt",
+ "value": "Dokument erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument herunterladen",
+ "value": "Dokument herunterladen"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument vorschauen",
+ "value": "Dokument vorschauen"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente",
+ "value": "Dokumente"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente auflisten",
+ "value": "Dokumente auflisten"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumentname",
+ "value": "Dokumentname"
+ },
+ {
+ "context": "ui",
+ "key": "Dunkel",
+ "value": "Dunkel"
+ },
+ {
+ "context": "ui",
+ "key": "Durchsuchen",
+ "value": "Durchsuchen"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail",
+ "value": "E-Mail"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse",
+ "value": "E-Mail-Adresse"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse ist erforderlich",
+ "value": "E-Mail-Adresse ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Bestätigung",
+ "value": "E-Mail-Bestätigung"
+ },
+ {
+ "context": "ui",
+ "key": "Echtzeit-Datensynchronisation:",
+ "value": "Echtzeit-Datensynchronisation:"
+ },
+ {
+ "context": "ui",
+ "key": "Eigenschaften",
+ "value": "Eigenschaften"
+ },
+ {
+ "context": "ui",
+ "key": "Eingereichte Daten:",
+ "value": "Eingereichte Daten:"
+ },
+ {
+ "context": "ui",
+ "key": "Einrichtungsanruf",
+ "value": "Einrichtungsanruf"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen",
+ "value": "Einstellungen"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen erfolgreich gespeichert!",
+ "value": "Einstellungen erfolgreich gespeichert!"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen werden in zukünftigen Updates hinzugefügt.",
+ "value": "Einstellungen werden in zukünftigen Updates hinzugefügt."
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen wurden erfolgreich zurückgesetzt.",
+ "value": "Einstellungen wurden erfolgreich zurückgesetzt."
+ },
+ {
+ "context": "ui",
+ "key": "Einträge",
+ "value": "Einträge"
+ },
+ {
+ "context": "ui",
+ "key": "Einträge pro Seite:",
+ "value": "Einträge pro Seite:"
+ },
+ {
+ "context": "ui",
+ "key": "Empfänger",
+ "value": "Empfänger"
+ },
+ {
+ "context": "ui",
+ "key": "Endzeit",
+ "value": "Endzeit"
+ },
+ {
+ "context": "ui",
+ "key": "English",
+ "value": "English"
+ },
+ {
+ "context": "ui",
+ "key": "Entdeckte Sites",
+ "value": "Entdeckte Sites"
+ },
+ {
+ "context": "ui",
+ "key": "Entfernen",
+ "value": "Entfernen"
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgreich",
+ "value": "Erfolgreich"
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgsrate",
+ "value": "Erfolgsrate"
+ },
+ {
+ "context": "ui",
+ "key": "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.",
+ "value": "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet."
+ },
+ {
+ "context": "ui",
+ "key": "Erneut versuchen",
+ "value": "Erneut versuchen"
+ },
+ {
+ "context": "ui",
+ "key": "Erste Seite",
+ "value": "Erste Seite"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen",
+ "value": "Erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.",
+ "value": "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen."
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen...",
+ "value": "Erstellen..."
+ },
+ {
+ "context": "ui",
+ "key": "Erstellt",
+ "value": "Erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellte Dateien",
+ "value": "Erstellte Dateien"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellungsdatum",
+ "value": "Erstellungsdatum"
+ },
+ {
+ "context": "ui",
+ "key": "Exportiere...",
+ "value": "Exportiere..."
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail",
+ "value": "Externe E-Mail"
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail-Adresse eingeben",
+ "value": "Externe E-Mail-Adresse eingeben"
+ },
+ {
+ "context": "ui",
+ "key": "Externen Benutzernamen eingeben",
+ "value": "Externen Benutzernamen eingeben"
+ },
+ {
+ "context": "ui",
+ "key": "Externer Benutzername",
+ "value": "Externer Benutzername"
+ },
+ {
+ "context": "ui",
+ "key": "FEHLER",
+ "value": "FEHLER"
+ },
+ {
+ "context": "ui",
+ "key": "FEHLGESCHLAGEN",
+ "value": "FEHLGESCHLAGEN"
+ },
+ {
+ "context": "ui",
+ "key": "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.",
+ "value": "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren."
+ },
+ {
+ "context": "ui",
+ "key": "Fehler",
+ "value": "Fehler"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Aktualisieren der Benutzerinformationen",
+ "value": "Fehler beim Aktualisieren der Benutzerinformationen"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Automatisierung",
+ "value": "Fehler beim Erstellen der Automatisierung"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Organisation",
+ "value": "Fehler beim Erstellen der Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Position",
+ "value": "Fehler beim Erstellen der Position"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der RBAC-Regel",
+ "value": "Fehler beim Erstellen der RBAC-Regel"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Rolle",
+ "value": "Fehler beim Erstellen der Rolle"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Dokuments",
+ "value": "Fehler beim Erstellen des Dokuments"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Mandats",
+ "value": "Fehler beim Erstellen des Mandats"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Prompts",
+ "value": "Fehler beim Erstellen des Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Team-Mitglieds",
+ "value": "Fehler beim Erstellen des Team-Mitglieds"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Vertrags",
+ "value": "Fehler beim Erstellen des Vertrags"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Zugriffs",
+ "value": "Fehler beim Erstellen des Zugriffs"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer",
+ "value": "Fehler beim Laden der Benutzer"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer:",
+ "value": "Fehler beim Laden der Benutzer:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzerinformationen",
+ "value": "Fehler beim Laden der Benutzerinformationen"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Dateien:",
+ "value": "Fehler beim Laden der Dateien:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Logs",
+ "value": "Fehler beim Laden der Logs"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Nachrichten:",
+ "value": "Fehler beim Laden der Nachrichten:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts",
+ "value": "Fehler beim Laden der Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts:",
+ "value": "Fehler beim Laden der Prompts:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der SharePoint Dokumente:",
+ "value": "Fehler beim Laden der SharePoint Dokumente:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Vorschau",
+ "value": "Fehler beim Laden der Vorschau"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Workflows:",
+ "value": "Fehler beim Laden der Workflows:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Löschen",
+ "value": "Fehler beim Löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.",
+ "value": "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut."
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Teilen des Prompts",
+ "value": "Fehler beim Teilen des Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Verarbeiten der Dateien",
+ "value": "Fehler beim Verarbeiten der Dateien"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler:",
+ "value": "Fehler:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehlgeschlagen",
+ "value": "Fehlgeschlagen"
+ },
+ {
+ "context": "ui",
+ "key": "Filter",
+ "value": "Filter"
+ },
+ {
+ "context": "ui",
+ "key": "Filter löschen",
+ "value": "Filter löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Filter: {value}",
+ "value": "Filter: {value}"
+ },
+ {
+ "context": "ui",
+ "key": "Firma",
+ "value": "Firma"
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname",
+ "value": "Firmenname"
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname ist erforderlich",
+ "value": "Firmenname ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Folgenachricht wird gesendet...",
+ "value": "Folgenachricht wird gesendet..."
+ },
+ {
+ "context": "ui",
+ "key": "Fortfahren",
+ "value": "Fortfahren"
+ },
+ {
+ "context": "ui",
+ "key": "Fortsetzen",
+ "value": "Fortsetzen"
+ },
+ {
+ "context": "ui",
+ "key": "Fragen?",
+ "value": "Fragen?"
+ },
+ {
+ "context": "ui",
+ "key": "Français",
+ "value": "Français"
+ },
+ {
+ "context": "ui",
+ "key": "Fügen Sie eine Nachricht für die Empfänger hinzu",
+ "value": "Fügen Sie eine Nachricht für die Empfänger hinzu"
+ },
+ {
+ "context": "ui",
+ "key": "GESTOPPT",
+ "value": "GESTOPPT"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Ihren Firmennamen ein",
+ "value": "Geben Sie Ihren Firmennamen ein"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.",
+ "value": "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist."
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie den Inhalt des Prompts ein",
+ "value": "Geben Sie den Inhalt des Prompts ein"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen Namen für den Prompt ein",
+ "value": "Geben Sie einen Namen für den Prompt ein"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen benutzerdefinierten Titel ein",
+ "value": "Geben Sie einen benutzerdefinierten Titel ein"
+ },
+ {
+ "context": "ui",
+ "key": "Geplante und automatisierte Workflows",
+ "value": "Geplante und automatisierte Workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten",
+ "value": "Geschäftszeiten"
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten & Zeitzone",
+ "value": "Geschäftszeiten & Zeitzone"
+ },
+ {
+ "context": "ui",
+ "key": "Gespräch fortsetzen...",
+ "value": "Gespräch fortsetzen..."
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet",
+ "value": "Gestartet"
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet:",
+ "value": "Gestartet:"
+ },
+ {
+ "context": "ui",
+ "key": "Gestoppt",
+ "value": "Gestoppt"
+ },
+ {
+ "context": "ui",
+ "key": "Geteilt",
+ "value": "Geteilt"
+ },
+ {
+ "context": "ui",
+ "key": "Geteilte Dateien",
+ "value": "Geteilte Dateien"
+ },
+ {
+ "context": "ui",
+ "key": "Globale Sprachsets verwalten (SysAdmin).",
+ "value": "Globale Sprachsets verwalten (SysAdmin)."
+ },
+ {
+ "context": "ui",
+ "key": "Google",
+ "value": "Google"
+ },
+ {
+ "context": "ui",
+ "key": "Google verbinden",
+ "value": "Google verbinden"
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung erstellen",
+ "value": "Google-Verbindung erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung hinzufügen",
+ "value": "Google-Verbindung hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Grundlegende Daten und Ressourcen",
+ "value": "Grundlegende Daten und Ressourcen"
+ },
+ {
+ "context": "ui",
+ "key": "Größe",
+ "value": "Größe"
+ },
+ {
+ "context": "ui",
+ "key": "Hell",
+ "value": "Hell"
+ },
+ {
+ "context": "ui",
+ "key": "Herunterladen",
+ "value": "Herunterladen"
+ },
+ {
+ "context": "ui",
+ "key": "Hinzufügen",
+ "value": "Hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Hochgeladen",
+ "value": "Hochgeladen"
+ },
+ {
+ "context": "ui",
+ "key": "Hochladen",
+ "value": "Hochladen"
+ },
+ {
+ "context": "ui",
+ "key": "ID",
+ "value": "ID"
+ },
+ {
+ "context": "ui",
+ "key": "INFO",
+ "value": "INFO"
+ },
+ {
+ "context": "ui",
+ "key": "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.",
+ "value": "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit."
+ },
+ {
+ "context": "ui",
+ "key": "Ihre Anfrage wird verarbeitet...",
+ "value": "Ihre Anfrage wird verarbeitet..."
+ },
+ {
+ "context": "ui",
+ "key": "In die Zwischenablage kopiert",
+ "value": "In die Zwischenablage kopiert"
+ },
+ {
+ "context": "ui",
+ "key": "Inaktiv",
+ "value": "Inaktiv"
+ },
+ {
+ "context": "ui",
+ "key": "Information",
+ "value": "Information"
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt",
+ "value": "Inhalt"
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt ist erforderlich",
+ "value": "Inhalt ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Ja",
+ "value": "Ja"
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt anmelden",
+ "value": "Jetzt anmelden"
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt überspringen",
+ "value": "Jetzt überspringen"
+ },
+ {
+ "context": "ui",
+ "key": "KI-erstellt",
+ "value": "KI-erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "KI-gestützte Dokumentengenerierung:",
+ "value": "KI-gestützte Dokumentengenerierung:"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Auth-Anbieter",
+ "value": "Kein Auth-Anbieter"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Benutzername",
+ "value": "Kein Benutzername"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Nachrichteninhalt verfügbar",
+ "value": "Kein Nachrichteninhalt verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Name",
+ "value": "Kein Name"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Workflow ausgewählt",
+ "value": "Kein Workflow ausgewählt"
+ },
+ {
+ "context": "ui",
+ "key": "Keine",
+ "value": "Keine"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Benutzer verfügbar",
+ "value": "Keine Benutzer verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung",
+ "value": "Keine Berechtigung"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung zum Löschen des Prompts",
+ "value": "Keine Berechtigung zum Löschen des Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Dateien gefunden.",
+ "value": "Keine Dateien gefunden."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Daten verfügbar",
+ "value": "Keine Daten verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine E-Mail",
+ "value": "Keine E-Mail"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Einträge",
+ "value": "Keine Einträge"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Logs für diesen Workflow verfügbar",
+ "value": "Keine Logs für diesen Workflow verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.",
+ "value": "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Optionen verfügbar",
+ "value": "Keine Optionen verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Prompts verfügbar",
+ "value": "Keine Prompts verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine SharePoint-Sites gefunden",
+ "value": "Keine SharePoint-Sites gefunden"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.",
+ "value": "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprache",
+ "value": "Keine Sprache"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Transkripte vorhanden",
+ "value": "Keine Transkripte vorhanden"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Vorschau verfügbar",
+ "value": "Keine Vorschau verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows gefunden",
+ "value": "Keine Workflows gefunden"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows verfügbar",
+ "value": "Keine Workflows verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Keine hochgeladenen Dateien gefunden.",
+ "value": "Keine hochgeladenen Dateien gefunden."
+ },
+ {
+ "context": "ui",
+ "key": "Keine mit Ihnen geteilten Dateien gefunden.",
+ "value": "Keine mit Ihnen geteilten Dateien gefunden."
+ },
+ {
+ "context": "ui",
+ "key": "Keine von der KI erstellten Dateien gefunden.",
+ "value": "Keine von der KI erstellten Dateien gefunden."
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen",
+ "value": "Klicken Sie erneut zum Bestätigen"
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen der Löschung",
+ "value": "Klicken Sie erneut zum Bestätigen der Löschung"
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie, um zu öffnen",
+ "value": "Klicken Sie, um zu öffnen"
+ },
+ {
+ "context": "ui",
+ "key": "Knowledge Agent (KA)",
+ "value": "Knowledge Agent (KA)"
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.",
+ "value": "Konfigurieren Sie administrative Einstellungen und Systempräferenzen."
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.",
+ "value": "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln."
+ },
+ {
+ "context": "ui",
+ "key": "Kontakte einrichten",
+ "value": "Kontakte einrichten"
+ },
+ {
+ "context": "ui",
+ "key": "Kontaktinformationen",
+ "value": "Kontaktinformationen"
+ },
+ {
+ "context": "ui",
+ "key": "Kontostatus",
+ "value": "Kontostatus"
+ },
+ {
+ "context": "ui",
+ "key": "Kopieren",
+ "value": "Kopieren"
+ },
+ {
+ "context": "ui",
+ "key": "Kopiert",
+ "value": "Kopiert"
+ },
+ {
+ "context": "ui",
+ "key": "Kosteneinsparungen & Effizienz:",
+ "value": "Kosteneinsparungen & Effizienz:"
+ },
+ {
+ "context": "ui",
+ "key": "Kundenverträge verwalten",
+ "value": "Kundenverträge verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Lade Filterwerte...",
+ "value": "Lade Filterwerte..."
+ },
+ {
+ "context": "ui",
+ "key": "Lade Fortschritt...",
+ "value": "Lade Fortschritt..."
+ },
+ {
+ "context": "ui",
+ "key": "Laden...",
+ "value": "Laden..."
+ },
+ {
+ "context": "ui",
+ "key": "Land",
+ "value": "Land"
+ },
+ {
+ "context": "ui",
+ "key": "Land ist erforderlich",
+ "value": "Land ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Leer = Zugriff auf alle Verträge",
+ "value": "Leer = Zugriff auf alle Verträge"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität",
+ "value": "Letzte Aktivität"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität:",
+ "value": "Letzte Aktivität:"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit",
+ "value": "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Seite",
+ "value": "Letzte Seite"
+ },
+ {
+ "context": "ui",
+ "key": "Link konnte nicht gesendet werden",
+ "value": "Link konnte nicht gesendet werden"
+ },
+ {
+ "context": "ui",
+ "key": "Log",
+ "value": "Log"
+ },
+ {
+ "context": "ui",
+ "key": "Logs konnten nicht geladen werden",
+ "value": "Logs konnten nicht geladen werden"
+ },
+ {
+ "context": "ui",
+ "key": "Logs werden geladen...",
+ "value": "Logs werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Lokal",
+ "value": "Lokal"
+ },
+ {
+ "context": "ui",
+ "key": "LÄUFT",
+ "value": "LÄUFT"
+ },
+ {
+ "context": "ui",
+ "key": "Lädt hoch...",
+ "value": "Lädt hoch..."
+ },
+ {
+ "context": "ui",
+ "key": "Läuft",
+ "value": "Läuft"
+ },
+ {
+ "context": "ui",
+ "key": "Läuft ab am",
+ "value": "Läuft ab am"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen",
+ "value": "Löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen ({count})",
+ "value": "Löschen ({count})"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen...",
+ "value": "Löschen..."
+ },
+ {
+ "context": "ui",
+ "key": "MIME-Typ",
+ "value": "MIME-Typ"
+ },
+ {
+ "context": "ui",
+ "key": "Management-Tools umfassen:",
+ "value": "Management-Tools umfassen:"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.",
+ "value": "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert."
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich eingereicht!",
+ "value": "Mandat erfolgreich eingereicht!"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich erstellt",
+ "value": "Mandat erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erstellen",
+ "value": "Mandat erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat hinzufügen",
+ "value": "Mandat hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat-ID",
+ "value": "Mandat-ID"
+ },
+ {
+ "context": "ui",
+ "key": "Mandate",
+ "value": "Mandate"
+ },
+ {
+ "context": "ui",
+ "key": "Mandate und Berechtigungen verwalten",
+ "value": "Mandate und Berechtigungen verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Mandatsverwaltung",
+ "value": "Mandatsverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Mehr erfahren",
+ "value": "Mehr erfahren"
+ },
+ {
+ "context": "ui",
+ "key": "Meine Uploads",
+ "value": "Meine Uploads"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft",
+ "value": "Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft Verbindungen",
+ "value": "Microsoft Verbindungen"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft verbinden",
+ "value": "Microsoft verbinden"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung erstellen",
+ "value": "Microsoft-Verbindung erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung hinzufügen",
+ "value": "Microsoft-Verbindung hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Mitglied hinzufügen",
+ "value": "Mitglied hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "MwSt %",
+ "value": "MwSt %"
+ },
+ {
+ "context": "ui",
+ "key": "MwSt Betrag",
+ "value": "MwSt Betrag"
+ },
+ {
+ "context": "ui",
+ "key": "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.",
+ "value": "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun."
+ },
+ {
+ "context": "ui",
+ "key": "Nach unten scrollen",
+ "value": "Nach unten scrollen"
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht (optional)",
+ "value": "Nachricht (optional)"
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht eingeben...",
+ "value": "Nachricht eingeben..."
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht wird gesendet...",
+ "value": "Nachricht wird gesendet..."
+ },
+ {
+ "context": "ui",
+ "key": "Nachrichten",
+ "value": "Nachrichten"
+ },
+ {
+ "context": "ui",
+ "key": "Nahtloser Mandanten-Workflow:",
+ "value": "Nahtloser Mandanten-Workflow:"
+ },
+ {
+ "context": "ui",
+ "key": "Name",
+ "value": "Name"
+ },
+ {
+ "context": "ui",
+ "key": "Name des Unternehmens",
+ "value": "Name des Unternehmens"
+ },
+ {
+ "context": "ui",
+ "key": "Name ist erforderlich",
+ "value": "Name ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Navigation - Erkunden Sie alle verfügbaren Tools",
+ "value": "Navigation - Erkunden Sie alle verfügbaren Tools"
+ },
+ {
+ "context": "ui",
+ "key": "Nein",
+ "value": "Nein"
+ },
+ {
+ "context": "ui",
+ "key": "Neu starten",
+ "value": "Neu starten"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung",
+ "value": "Neue Automatisierung"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung erstellen",
+ "value": "Neue Automatisierung erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Datei hochladen",
+ "value": "Neue Datei hochladen"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation",
+ "value": "Neue Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation erstellen",
+ "value": "Neue Organisation erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position",
+ "value": "Neue Position"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position erstellen",
+ "value": "Neue Position erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neue RBAC-Regel erstellen",
+ "value": "Neue RBAC-Regel erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle",
+ "value": "Neue Rolle"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle erstellen",
+ "value": "Neue Rolle erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Sprache",
+ "value": "Neue Sprache"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Prompt erstellen",
+ "value": "Neuen Prompt erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Vertrag erstellen",
+ "value": "Neuen Vertrag erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Zugriff erstellen",
+ "value": "Neuen Zugriff erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Prompt",
+ "value": "Neuer Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Vertrag",
+ "value": "Neuer Vertrag"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Zugriff",
+ "value": "Neuer Zugriff"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument",
+ "value": "Neues Dokument"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument erstellen",
+ "value": "Neues Dokument erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Mandat erstellen",
+ "value": "Neues Mandat erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Team-Mitglied erstellen",
+ "value": "Neues Team-Mitglied erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Transkript",
+ "value": "Neues Transkript"
+ },
+ {
+ "context": "ui",
+ "key": "Nicht verfügbar",
+ "value": "Nicht verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.",
+ "value": "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen."
+ },
+ {
+ "context": "ui",
+ "key": "Noch keinen Workflow ausgewählt",
+ "value": "Noch keinen Workflow ausgewählt"
+ },
+ {
+ "context": "ui",
+ "key": "Nochmal versuchen",
+ "value": "Nochmal versuchen"
+ },
+ {
+ "context": "ui",
+ "key": "Nächste Seite",
+ "value": "Nächste Seite"
+ },
+ {
+ "context": "ui",
+ "key": "Oder geben Sie Ihre Nachricht ein...",
+ "value": "Oder geben Sie Ihre Nachricht ein..."
+ },
+ {
+ "context": "ui",
+ "key": "Ordnerpfade",
+ "value": "Ordnerpfade"
+ },
+ {
+ "context": "ui",
+ "key": "Organisation",
+ "value": "Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Organisation erfolgreich erstellt",
+ "value": "Organisation erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Organisationen",
+ "value": "Organisationen"
+ },
+ {
+ "context": "ui",
+ "key": "Originalbetrag",
+ "value": "Originalbetrag"
+ },
+ {
+ "context": "ui",
+ "key": "Originalwährung",
+ "value": "Originalwährung"
+ },
+ {
+ "context": "ui",
+ "key": "PDF",
+ "value": "PDF"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort",
+ "value": "Passwort"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort eingeben",
+ "value": "Passwort eingeben"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link gesendet!",
+ "value": "Passwort-Link gesendet!"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link senden",
+ "value": "Passwort-Link senden"
+ },
+ {
+ "context": "ui",
+ "key": "Pfad",
+ "value": "Pfad"
+ },
+ {
+ "context": "ui",
+ "key": "Position erfolgreich erstellt",
+ "value": "Position erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Positionen",
+ "value": "Positionen"
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl",
+ "value": "Postleitzahl"
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl ist erforderlich",
+ "value": "Postleitzahl ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Projekte",
+ "value": "Projekte"
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung",
+ "value": "Projektverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung und -organisation",
+ "value": "Projektverwaltung und -organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt",
+ "value": "Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Einstellungen",
+ "value": "Prompt Einstellungen"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Vorlage",
+ "value": "Prompt Vorlage"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt ausführen",
+ "value": "Prompt ausführen"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt auswählen...",
+ "value": "Prompt auswählen..."
+ },
+ {
+ "context": "ui",
+ "key": "Prompt bearbeiten",
+ "value": "Prompt bearbeiten"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erfolgreich erstellt",
+ "value": "Prompt erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erstellen",
+ "value": "Prompt erstellen"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt hinzufügen",
+ "value": "Prompt hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt löschen",
+ "value": "Prompt löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt teilen",
+ "value": "Prompt teilen"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt wird gelöscht...",
+ "value": "Prompt wird gelöscht..."
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt",
+ "value": "Prompt-Inhalt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten",
+ "value": "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf nicht leer sein",
+ "value": "Prompt-Inhalt darf nicht leer sein"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name",
+ "value": "Prompt-Name"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf 100 Zeichen nicht überschreiten",
+ "value": "Prompt-Name darf 100 Zeichen nicht überschreiten"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf nicht leer sein",
+ "value": "Prompt-Name darf nicht leer sein"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts",
+ "value": "Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts für Ihren KI-Assistenten erstellen und verwalten",
+ "value": "Prompts für Ihren KI-Assistenten erstellen und verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts verwalten",
+ "value": "Prompts verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts werden geladen...",
+ "value": "Prompts werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Python",
+ "value": "Python"
+ },
+ {
+ "context": "ui",
+ "key": "Quelle",
+ "value": "Quelle"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel erfolgreich erstellt",
+ "value": "RBAC-Regel erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel hinzufügen",
+ "value": "RBAC-Regel hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regeln",
+ "value": "RBAC-Regeln"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regelverwaltung",
+ "value": "RBAC-Regelverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollen",
+ "value": "RBAC-Rollen"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollenverwaltung",
+ "value": "RBAC-Rollenverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Registrieren",
+ "value": "Registrieren"
+ },
+ {
+ "context": "ui",
+ "key": "Revolutionäre Telefonie-Integration mit Spitch.ai",
+ "value": "Revolutionäre Telefonie-Integration mit Spitch.ai"
+ },
+ {
+ "context": "ui",
+ "key": "Rohtext in die Zwischenablage kopieren",
+ "value": "Rohtext in die Zwischenablage kopieren"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle",
+ "value": "Rolle"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle erfolgreich erstellt",
+ "value": "Rolle erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle hinzufügen",
+ "value": "Rolle hinzufügen"
+ },
+ {
+ "context": "ui",
+ "key": "Rollen",
+ "value": "Rollen"
+ },
+ {
+ "context": "ui",
+ "key": "Rollen-ID",
+ "value": "Rollen-ID"
+ },
+ {
+ "context": "ui",
+ "key": "Rollenbasierte Zugriffssteuerungsregeln",
+ "value": "Rollenbasierte Zugriffssteuerungsregeln"
+ },
+ {
+ "context": "ui",
+ "key": "Rollenverwaltung",
+ "value": "Rollenverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Rufname am Telefon",
+ "value": "Rufname am Telefon"
+ },
+ {
+ "context": "ui",
+ "key": "Runde",
+ "value": "Runde"
+ },
+ {
+ "context": "ui",
+ "key": "Runden",
+ "value": "Runden"
+ },
+ {
+ "context": "ui",
+ "key": "Schließen",
+ "value": "Schließen"
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff",
+ "value": "Schnellzugriff"
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff - Springen Sie zu häufig verwendeten Features",
+ "value": "Schnellzugriff - Springen Sie zu häufig verwendeten Features"
+ },
+ {
+ "context": "ui",
+ "key": "Seite",
+ "value": "Seite"
+ },
+ {
+ "context": "ui",
+ "key": "Seite {page} von {total} ({count} Einträge)",
+ "value": "Seite {page} von {total} ({count} Einträge)"
+ },
+ {
+ "context": "ui",
+ "key": "Senden",
+ "value": "Senden"
+ },
+ {
+ "context": "ui",
+ "key": "Service",
+ "value": "Service"
+ },
+ {
+ "context": "ui",
+ "key": "Service-Verbindungen",
+ "value": "Service-Verbindungen"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Dokumente",
+ "value": "SharePoint Dokumente"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Site URL",
+ "value": "SharePoint Site URL"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Test",
+ "value": "SharePoint Test"
+ },
+ {
+ "context": "ui",
+ "key": "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.",
+ "value": "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail."
+ },
+ {
+ "context": "ui",
+ "key": "Sie können auch auf den Upload-Button klicken",
+ "value": "Sie können auch auf den Upload-Button klicken"
+ },
+ {
+ "context": "ui",
+ "key": "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.",
+ "value": "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen."
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden."
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie das ausgewählte Element löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie das ausgewählte Element löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?",
+ "value": "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?"
+ },
+ {
+ "context": "ui",
+ "key": "Sites entdecken",
+ "value": "Sites entdecken"
+ },
+ {
+ "context": "ui",
+ "key": "Sortierung {position}: {direction}",
+ "value": "Sortierung {position}: {direction}"
+ },
+ {
+ "context": "ui",
+ "key": "Speech Analytics (SA)",
+ "value": "Speech Analytics (SA)"
+ },
+ {
+ "context": "ui",
+ "key": "Speichern",
+ "value": "Speichern"
+ },
+ {
+ "context": "ui",
+ "key": "Speichern...",
+ "value": "Speichern..."
+ },
+ {
+ "context": "ui",
+ "key": "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.",
+ "value": "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten."
+ },
+ {
+ "context": "ui",
+ "key": "Sprach Integration",
+ "value": "Sprach Integration"
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Einstellungen",
+ "value": "Sprach-Einstellungen"
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Integration Einstellungen",
+ "value": "Sprach-Integration Einstellungen"
+ },
+ {
+ "context": "ui",
+ "key": "Sprache",
+ "value": "Sprache"
+ },
+ {
+ "context": "ui",
+ "key": "Sprachset {code} wirklich löschen?",
+ "value": "Sprachset {code} wirklich löschen?"
+ },
+ {
+ "context": "ui",
+ "key": "Stadt",
+ "value": "Stadt"
+ },
+ {
+ "context": "ui",
+ "key": "Stadt ist erforderlich",
+ "value": "Stadt ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Start",
+ "value": "Start"
+ },
+ {
+ "context": "ui",
+ "key": "Startzeit",
+ "value": "Startzeit"
+ },
+ {
+ "context": "ui",
+ "key": "Status",
+ "value": "Status"
+ },
+ {
+ "context": "ui",
+ "key": "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.",
+ "value": "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop."
+ },
+ {
+ "context": "ui",
+ "key": "Stoppen",
+ "value": "Stoppen"
+ },
+ {
+ "context": "ui",
+ "key": "Straße",
+ "value": "Straße"
+ },
+ {
+ "context": "ui",
+ "key": "Straße ist erforderlich",
+ "value": "Straße ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.",
+ "value": "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten."
+ },
+ {
+ "context": "ui",
+ "key": "Suchen...",
+ "value": "Suchen..."
+ },
+ {
+ "context": "ui",
+ "key": "Systemadministrator",
+ "value": "Systemadministrator"
+ },
+ {
+ "context": "ui",
+ "key": "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren",
+ "value": "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren"
+ },
+ {
+ "context": "ui",
+ "key": "Tabelle",
+ "value": "Tabelle"
+ },
+ {
+ "context": "ui",
+ "key": "Tags",
+ "value": "Tags"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Bereich",
+ "value": "Team-Bereich"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglied erfolgreich erstellt",
+ "value": "Team-Mitglied erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder",
+ "value": "Team-Mitglieder"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten",
+ "value": "Team-Mitglieder verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren",
+ "value": "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren"
+ },
+ {
+ "context": "ui",
+ "key": "Teilen",
+ "value": "Teilen"
+ },
+ {
+ "context": "ui",
+ "key": "Telefon",
+ "value": "Telefon"
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer",
+ "value": "Telefonnummer"
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer ist erforderlich",
+ "value": "Telefonnummer ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "Text",
+ "value": "Text"
+ },
+ {
+ "context": "ui",
+ "key": "Textvorschau",
+ "value": "Textvorschau"
+ },
+ {
+ "context": "ui",
+ "key": "Theme",
+ "value": "Theme"
+ },
+ {
+ "context": "ui",
+ "key": "Token",
+ "value": "Token"
+ },
+ {
+ "context": "ui",
+ "key": "Transkript",
+ "value": "Transkript"
+ },
+ {
+ "context": "ui",
+ "key": "Transkript wird verarbeitet...",
+ "value": "Transkript wird verarbeitet..."
+ },
+ {
+ "context": "ui",
+ "key": "Transkriptverwaltung",
+ "value": "Transkriptverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Trennungsfehler",
+ "value": "Trennungsfehler"
+ },
+ {
+ "context": "ui",
+ "key": "Treuhand",
+ "value": "Treuhand"
+ },
+ {
+ "context": "ui",
+ "key": "Treuhandverwaltung",
+ "value": "Treuhandverwaltung"
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Organisationen verwalten",
+ "value": "Trustee-Organisationen verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Rollen verwalten",
+ "value": "Trustee-Rollen verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Typ",
+ "value": "Typ"
+ },
+ {
+ "context": "ui",
+ "key": "UI-Sprachen",
+ "value": "UI-Sprachen"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannt",
+ "value": "Unbekannt"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannte Größe",
+ "value": "Unbekannte Größe"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekanntes Datum",
+ "value": "Unbekanntes Datum"
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannt",
+ "value": "Unbenannt"
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannter Workflow",
+ "value": "Unbenannter Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültige Auswahl",
+ "value": "Ungültige Auswahl"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültige URL",
+ "value": "Ungültige URL"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges Datum",
+ "value": "Ungültiges Datum"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges Datumsformat",
+ "value": "Ungültiges Datumsformat"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges E-Mail-Format",
+ "value": "Ungültiges E-Mail-Format"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges JSON",
+ "value": "Ungültiges JSON"
+ },
+ {
+ "context": "ui",
+ "key": "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.",
+ "value": "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen."
+ },
+ {
+ "context": "ui",
+ "key": "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.",
+ "value": "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten."
+ },
+ {
+ "context": "ui",
+ "key": "Unternehmensinformationen",
+ "value": "Unternehmensinformationen"
+ },
+ {
+ "context": "ui",
+ "key": "Unterstützt von",
+ "value": "Unterstützt von"
+ },
+ {
+ "context": "ui",
+ "key": "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.",
+ "value": "Upload fehlgeschlagen. Bitte versuchen Sie es erneut."
+ },
+ {
+ "context": "ui",
+ "key": "VERARBEITUNG",
+ "value": "VERARBEITUNG"
+ },
+ {
+ "context": "ui",
+ "key": "Valutadatum",
+ "value": "Valutadatum"
+ },
+ {
+ "context": "ui",
+ "key": "Verarbeitung",
+ "value": "Verarbeitung"
+ },
+ {
+ "context": "ui",
+ "key": "Verbinden",
+ "value": "Verbinden"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung aktualisieren",
+ "value": "Verbindung aktualisieren"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung testen",
+ "value": "Verbindung testen"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen",
+ "value": "Verbindungen"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen werden geladen...",
+ "value": "Verbindungen werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungsfehler",
+ "value": "Verbindungsfehler"
+ },
+ {
+ "context": "ui",
+ "key": "Verbunden am",
+ "value": "Verbunden am"
+ },
+ {
+ "context": "ui",
+ "key": "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.",
+ "value": "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen."
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Tools",
+ "value": "Verfügbare Tools"
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Workflows",
+ "value": "Verfügbare Workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Version",
+ "value": "Version"
+ },
+ {
+ "context": "ui",
+ "key": "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.",
+ "value": "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden."
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag",
+ "value": "Vertrag"
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag (optional)",
+ "value": "Vertrag (optional)"
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag erfolgreich erstellt",
+ "value": "Vertrag erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Verträge",
+ "value": "Verträge"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.",
+ "value": "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen."
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Kontoinformationen",
+ "value": "Verwalten Sie Ihre Kontoinformationen"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Service-Verbindungen",
+ "value": "Verwalten Sie Ihre Service-Verbindungen"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.",
+ "value": "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen."
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Mandate und deren zugehörige Berechtigungen.",
+ "value": "Verwalten Sie Mandate und deren zugehörige Berechtigungen."
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltet von {provider}",
+ "value": "Verwaltet von {provider}"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Benutzerzugriffe auf Organisationen",
+ "value": "Verwaltung der Benutzerzugriffe auf Organisationen"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Buchungspositionen (Speseneinträge)",
+ "value": "Verwaltung der Buchungspositionen (Speseneinträge)"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Dokumente und Belege",
+ "value": "Verwaltung der Dokumente und Belege"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Feature-spezifischen Rollen",
+ "value": "Verwaltung der Feature-spezifischen Rollen"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Kundenverträge",
+ "value": "Verwaltung der Kundenverträge"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Treuhand-Organisationen",
+ "value": "Verwaltung der Treuhand-Organisationen"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen",
+ "value": "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltungs- und Management-Tools",
+ "value": "Verwaltungs- und Management-Tools"
+ },
+ {
+ "context": "ui",
+ "key": "Verwende Vorlage:",
+ "value": "Verwende Vorlage:"
+ },
+ {
+ "context": "ui",
+ "key": "Video",
+ "value": "Video"
+ },
+ {
+ "context": "ui",
+ "key": "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.",
+ "value": "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen."
+ },
+ {
+ "context": "ui",
+ "key": "Virtual Assistant (VA)",
+ "value": "Virtual Assistant (VA)"
+ },
+ {
+ "context": "ui",
+ "key": "Voice Biometrics (VB)",
+ "value": "Voice Biometrics (VB)"
+ },
+ {
+ "context": "ui",
+ "key": "Vollständiger Name",
+ "value": "Vollständiger Name"
+ },
+ {
+ "context": "ui",
+ "key": "Von",
+ "value": "Von"
+ },
+ {
+ "context": "ui",
+ "key": "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.",
+ "value": "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet."
+ },
+ {
+ "context": "ui",
+ "key": "Vorherige Seite",
+ "value": "Vorherige Seite"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau",
+ "value": "Vorschau"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau für diesen Dateityp nicht verfügbar",
+ "value": "Vorschau für diesen Dateityp nicht verfügbar"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau schließen",
+ "value": "Vorschau schließen"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau wird geladen...",
+ "value": "Vorschau wird geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "WARTEND",
+ "value": "WARTEND"
+ },
+ {
+ "context": "ui",
+ "key": "Wartend",
+ "value": "Wartend"
+ },
+ {
+ "context": "ui",
+ "key": "Was passiert als nächstes?",
+ "value": "Was passiert als nächstes?"
+ },
+ {
+ "context": "ui",
+ "key": "Wechseln Sie zwischen hellem und dunklem Modus",
+ "value": "Wechseln Sie zwischen hellem und dunklem Modus"
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge",
+ "value": "Werkzeuge"
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge und Hilfsmittel",
+ "value": "Werkzeuge und Hilfsmittel"
+ },
+ {
+ "context": "ui",
+ "key": "Wie möchten Sie am Telefon genannt werden?",
+ "value": "Wie möchten Sie am Telefon genannt werden?"
+ },
+ {
+ "context": "ui",
+ "key": "Wiederholen",
+ "value": "Wiederholen"
+ },
+ {
+ "context": "ui",
+ "key": "Willkommen in Ihrem Arbeitsbereich",
+ "value": "Willkommen in Ihrem Arbeitsbereich"
+ },
+ {
+ "context": "ui",
+ "key": "Wird gesendet...",
+ "value": "Wird gesendet..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird gestoppt...",
+ "value": "Wird gestoppt..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird geteilt...",
+ "value": "Wird geteilt..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird hochgeladen...",
+ "value": "Wird hochgeladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird verarbeitet...",
+ "value": "Wird verarbeitet..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow",
+ "value": "Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow Fortschritt",
+ "value": "Workflow Fortschritt"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow auswählen",
+ "value": "Workflow auswählen"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fehlgeschlagen.",
+ "value": "Workflow fehlgeschlagen."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fortsetzen",
+ "value": "Workflow fortsetzen"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow läuft... Warte auf Logs...",
+ "value": "Workflow läuft... Warte auf Logs..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow löschen",
+ "value": "Workflow löschen"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow stoppen",
+ "value": "Workflow stoppen"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird fortgesetzt",
+ "value": "Workflow wird fortgesetzt"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird gelöscht...",
+ "value": "Workflow wird gelöscht..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Automatisierungen verwalten",
+ "value": "Workflow-Automatisierungen verwalten"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Nachrichten werden geladen...",
+ "value": "Workflow-Nachrichten werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Verlauf",
+ "value": "Workflow-Verlauf"
+ },
+ {
+ "context": "ui",
+ "key": "Workflows",
+ "value": "Workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Workflows werden geladen...",
+ "value": "Workflows werden geladen..."
+ },
+ {
+ "context": "ui",
+ "key": "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow",
+ "value": "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Wählen Sie Ihre bevorzugte Sprache",
+ "value": "Wählen Sie Ihre bevorzugte Sprache"
+ },
+ {
+ "context": "ui",
+ "key": "You",
+ "value": "You"
+ },
+ {
+ "context": "ui",
+ "key": "Zeitzone",
+ "value": "Zeitzone"
+ },
+ {
+ "context": "ui",
+ "key": "Zentrale",
+ "value": "Zentrale"
+ },
+ {
+ "context": "ui",
+ "key": "Zu dunklem Modus wechseln",
+ "value": "Zu dunklem Modus wechseln"
+ },
+ {
+ "context": "ui",
+ "key": "Zu hellem Modus wechseln",
+ "value": "Zu hellem Modus wechseln"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff",
+ "value": "Zugriff"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff erfolgreich erstellt",
+ "value": "Zugriff erfolgreich erstellt"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff verweigert",
+ "value": "Zugriff verweigert"
+ },
+ {
+ "context": "ui",
+ "key": "Zuletzt geprüft",
+ "value": "Zuletzt geprüft"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken",
+ "value": "Zum Bestätigen klicken"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken...",
+ "value": "Zum Bestätigen klicken..."
+ },
+ {
+ "context": "ui",
+ "key": "Zum Ein-/Ausklappen klicken",
+ "value": "Zum Ein-/Ausklappen klicken"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Filtern klicken",
+ "value": "Zum Filtern klicken"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Sortieren klicken",
+ "value": "Zum Sortieren klicken"
+ },
+ {
+ "context": "ui",
+ "key": "Zurück zur Sprach Integration",
+ "value": "Zurück zur Sprach Integration"
+ },
+ {
+ "context": "ui",
+ "key": "angehängt",
+ "value": "angehängt"
+ },
+ {
+ "context": "ui",
+ "key": "ausgewählt",
+ "value": "ausgewählt"
+ },
+ {
+ "context": "ui",
+ "key": "k. A.",
+ "value": "k. A."
+ },
+ {
+ "context": "ui",
+ "key": "kontakt@firma.com",
+ "value": "kontakt@firma.com"
+ },
+ {
+ "context": "ui",
+ "key": "oder",
+ "value": "oder"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Beleg.pdf",
+ "value": "z.B. Beleg.pdf"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Finanzdienstleistungen, Technologie, etc.",
+ "value": "z.B. Finanzdienstleistungen, Technologie, etc."
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Muster AG 2026",
+ "value": "z.B. Muster AG 2026"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Treuhand AG Zürich",
+ "value": "z.B. Treuhand AG Zürich"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. admin, operate, userreport",
+ "value": "z.B. admin, operate, userreport"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. treuhand-ag-zuerich",
+ "value": "z.B. treuhand-ag-zuerich"
+ },
+ {
+ "context": "ui",
+ "key": "{authority} Verbindung bearbeiten",
+ "value": "{authority} Verbindung bearbeiten"
+ },
+ {
+ "context": "ui",
+ "key": "{column} filtern",
+ "value": "{column} filtern"
+ },
+ {
+ "context": "ui",
+ "key": "{count} Benutzer ausgewählt",
+ "value": "{count} Benutzer ausgewählt"
+ },
+ {
+ "context": "ui",
+ "key": "{fieldLabel} ist erforderlich",
+ "value": "{fieldLabel} ist erforderlich"
+ },
+ {
+ "context": "ui",
+ "key": "{fieldLabel} muss eine gültige Ganzzahl sein",
+ "value": "{fieldLabel} muss eine gültige Ganzzahl sein"
+ },
+ {
+ "context": "ui",
+ "key": "{fieldLabel} muss eine gültige Zahl sein",
+ "value": "{fieldLabel} muss eine gültige Zahl sein"
+ },
+ {
+ "context": "ui",
+ "key": "Änderungen speichern",
+ "value": "Änderungen speichern"
+ },
+ {
+ "context": "ui",
+ "key": "Über",
+ "value": "Über"
+ },
+ {
+ "context": "ui",
+ "key": "Überprüfungsprozess",
+ "value": "Überprüfungsprozess"
+ },
+ {
+ "context": "ui",
+ "key": "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates",
+ "value": "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates"
+ },
+ {
+ "context": "ui",
+ "key": "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.",
+ "value": "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten."
+ },
+ {
+ "context": "ui",
+ "key": "(gefiltert nach {name})",
+ "value": "(gefiltert nach {name})"
+ },
+ {
+ "context": "ui",
+ "key": "({count} gefiltert)",
+ "value": "({count} gefiltert)"
+ },
+ {
+ "context": "ui",
+ "key": "Abonnement, Einstellungen und Guthaben pro Mandant",
+ "value": "Abonnement, Einstellungen und Guthaben pro Mandant"
+ },
+ {
+ "context": "ui",
+ "key": "Abrechnung",
+ "value": "Abrechnung"
+ },
+ {
+ "context": "ui",
+ "key": "Aktion",
+ "value": "Aktion"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Billing",
+ "value": "Benutzer-Billing"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Guthaben",
+ "value": "Benutzer-Guthaben"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer:",
+ "value": "Benutzer:"
+ },
+ {
+ "context": "ui",
+ "key": "Deaktiviert",
+ "value": "Deaktiviert"
+ },
+ {
+ "context": "ui",
+ "key": "Du hast Zugriff auf {instanceCount} {instanceWord} in {mandateCount} {mandateWord}.",
+ "value": "Du hast Zugriff auf {instanceCount} {instanceWord} in {mandateCount} {mandateWord}."
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen gespeichert!",
+ "value": "Einstellungen gespeichert!"
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanz",
+ "value": "Feature-Instanz"
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanzen",
+ "value": "Feature-Instanzen"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern",
+ "value": "Fehler beim Speichern"
+ },
+ {
+ "context": "ui",
+ "key": "Gesamtguthaben",
+ "value": "Gesamtguthaben"
+ },
+ {
+ "context": "ui",
+ "key": "Mandant:",
+ "value": "Mandant:"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten",
+ "value": "Mandanten"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Billing",
+ "value": "Mandanten-Billing"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Guthaben",
+ "value": "Mandanten-Guthaben"
+ },
+ {
+ "context": "ui",
+ "key": "Mandant",
+ "value": "Mandant"
+ },
+ {
+ "context": "ui",
+ "key": "Niedrig",
+ "value": "Niedrig"
+ },
+ {
+ "context": "ui",
+ "key": "Transaktionen",
+ "value": "Transaktionen"
+ },
+ {
+ "context": "ui",
+ "key": "Warnschwelle",
+ "value": "Warnschwelle"
+ },
+ {
+ "context": "ui",
+ "key": "✓ Mandat eingereicht",
+ "value": "✓ Mandat eingereicht"
+ }
+ ],
+ "status": "complete",
+ "isDefault": false
+ },
+ {
+ "id": "en",
+ "label": "English",
+ "entries": [
+ {
+ "context": "ui",
+ "key": "+41 123 456 789",
+ "value": "+41 123 456 789"
+ },
+ {
+ "context": "ui",
+ "key": "1 Benutzer ausgewählt",
+ "value": "1 user selected"
+ },
+ {
+ "context": "ui",
+ "key": "ABGEBROCHEN",
+ "value": "CANCELLED"
+ },
+ {
+ "context": "ui",
+ "key": "ABGESCHLOSSEN",
+ "value": "COMPLETED"
+ },
+ {
+ "context": "ui",
+ "key": "Abbrechen",
+ "value": "Cancel"
+ },
+ {
+ "context": "ui",
+ "key": "Abgeschlossen",
+ "value": "Completed"
+ },
+ {
+ "context": "ui",
+ "key": "Abmelden",
+ "value": "Logout"
+ },
+ {
+ "context": "ui",
+ "key": "Admin-Einstellungen",
+ "value": "Admin Settings"
+ },
+ {
+ "context": "ui",
+ "key": "Administrative Einstellungen",
+ "value": "Administrative settings"
+ },
+ {
+ "context": "ui",
+ "key": "Administrator",
+ "value": "Admin"
+ },
+ {
+ "context": "ui",
+ "key": "Adresse",
+ "value": "Address"
+ },
+ {
+ "context": "ui",
+ "key": "Agent Assist (AA)",
+ "value": "Agent Assist (AA)"
+ },
+ {
+ "context": "ui",
+ "key": "Aktionen",
+ "value": "Actions"
+ },
+ {
+ "context": "ui",
+ "key": "Aktiv",
+ "value": "Active"
+ },
+ {
+ "context": "ui",
+ "key": "Aktiviert",
+ "value": "Enabled"
+ },
+ {
+ "context": "ui",
+ "key": "Aktualisieren",
+ "value": "Update"
+ },
+ {
+ "context": "ui",
+ "key": "Aktuelle Transkripte",
+ "value": "Recent Transcripts"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Dateien",
+ "value": "All Files"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Elemente auswählen",
+ "value": "Select all items"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?",
+ "value": "Synchronize all non-default language sets with the German master now?"
+ },
+ {
+ "context": "ui",
+ "key": "Alle abwählen",
+ "value": "Deselect all"
+ },
+ {
+ "context": "ui",
+ "key": "Alle aktualisieren",
+ "value": "Update all"
+ },
+ {
+ "context": "ui",
+ "key": "Alle auswählen",
+ "value": "Select all"
+ },
+ {
+ "context": "ui",
+ "key": "Analysiere Workflow...",
+ "value": "Analyzing workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Anmelden",
+ "value": "Login"
+ },
+ {
+ "context": "ui",
+ "key": "Anrufer",
+ "value": "Caller"
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigen",
+ "value": "View"
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigename",
+ "value": "Display name"
+ },
+ {
+ "context": "ui",
+ "key": "Audio",
+ "value": "Audio"
+ },
+ {
+ "context": "ui",
+ "key": "Auf Standard zurücksetzen",
+ "value": "Reset to Default"
+ },
+ {
+ "context": "ui",
+ "key": "Aufgaben",
+ "value": "Tasks"
+ },
+ {
+ "context": "ui",
+ "key": "Ausführen",
+ "value": "Execute"
+ },
+ {
+ "context": "ui",
+ "key": "Ausgewählte Datei:",
+ "value": "Selected file:"
+ },
+ {
+ "context": "ui",
+ "key": "Auth-Anbieter",
+ "value": "Auth Authority"
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungsanbieter",
+ "value": "Authentication Provider"
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.",
+ "value": "Authentication token expired or invalid. Please reconnect your Microsoft account."
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierung erfolgreich erstellt",
+ "value": "Automation created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierungen",
+ "value": "Automations"
+ },
+ {
+ "context": "ui",
+ "key": "Basisdaten",
+ "value": "Base Data"
+ },
+ {
+ "context": "ui",
+ "key": "Bearbeiten",
+ "value": "Edit"
+ },
+ {
+ "context": "ui",
+ "key": "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")",
+ "value": "Enter a command (e.g., \"Create a new project named 'Main Street 42'\")"
+ },
+ {
+ "context": "ui",
+ "key": "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …",
+ "value": "Start a conversation by entering a message, selecting a template, or continuing a previous workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Beginnen Sie mit:",
+ "value": "Get started with:"
+ },
+ {
+ "context": "ui",
+ "key": "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.",
+ "value": "If approved, we'll schedule a setup call to configure your integration."
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein Fehler aufgetreten.",
+ "value": "An error occurred while uploading."
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.",
+ "value": "An unexpected error occurred while uploading."
+ },
+ {
+ "context": "ui",
+ "key": "Belege verwalten",
+ "value": "Manage receipts"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer",
+ "value": "User"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer auswählen",
+ "value": "Select Users"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer bearbeiten",
+ "value": "Edit User"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer erstellen",
+ "value": "Create User"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer hinzufügen",
+ "value": "Add User"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer löschen",
+ "value": "Delete User"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer werden geladen...",
+ "value": "Loading users..."
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Zugriff verwalten",
+ "value": "Manage user access"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerdefinierter Titel (optional)",
+ "value": "Custom Title (optional)"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen",
+ "value": "User Information"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen erfolgreich aktualisiert",
+ "value": "User information updated successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen werden geladen...",
+ "value": "Loading user information..."
+ },
+ {
+ "context": "ui",
+ "key": "Benutzername",
+ "value": "Username"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten",
+ "value": "User Management - Manage team members and permissions"
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigung",
+ "value": "Privilege"
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigungsstufe",
+ "value": "Privilege Level"
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung",
+ "value": "Description"
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung der Rolle",
+ "value": "Role description"
+ },
+ {
+ "context": "ui",
+ "key": "Betrachter",
+ "value": "Viewer"
+ },
+ {
+ "context": "ui",
+ "key": "Betreff",
+ "value": "Subject"
+ },
+ {
+ "context": "ui",
+ "key": "Bezeichnung",
+ "value": "Label"
+ },
+ {
+ "context": "ui",
+ "key": "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.",
+ "value": "Deliver assistance in live chat and deploy intelligent chatbots in all channels."
+ },
+ {
+ "context": "ui",
+ "key": "Bild",
+ "value": "Image"
+ },
+ {
+ "context": "ui",
+ "key": "Bitte geben Sie eine gültige E-Mail-Adresse ein",
+ "value": "Please enter a valid email address"
+ },
+ {
+ "context": "ui",
+ "key": "Bitte wählen Sie mindestens einen Benutzer aus",
+ "value": "Please select at least one user"
+ },
+ {
+ "context": "ui",
+ "key": "Branche",
+ "value": "Industry"
+ },
+ {
+ "context": "ui",
+ "key": "Branche ist erforderlich",
+ "value": "Industry is required"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungsbetrag",
+ "value": "Booking Amount"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungspositionen verwalten",
+ "value": "Manage booking positions"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungswährung",
+ "value": "Booking Currency"
+ },
+ {
+ "context": "ui",
+ "key": "Chat Platform (CP)",
+ "value": "Chat Platform (CP)"
+ },
+ {
+ "context": "ui",
+ "key": "Chat leeren...",
+ "value": "New Chat"
+ },
+ {
+ "context": "ui",
+ "key": "Chatbereich",
+ "value": "Chat Area"
+ },
+ {
+ "context": "ui",
+ "key": "Darstellung",
+ "value": "Appearance"
+ },
+ {
+ "context": "ui",
+ "key": "Datei",
+ "value": "File"
+ },
+ {
+ "context": "ui",
+ "key": "Datei anhängen",
+ "value": "Attach file"
+ },
+ {
+ "context": "ui",
+ "key": "Datei bereits vorhanden",
+ "value": "File Already Exists"
+ },
+ {
+ "context": "ui",
+ "key": "Datei entfernen",
+ "value": "Remove file"
+ },
+ {
+ "context": "ui",
+ "key": "Datei erfolgreich hochgeladen!",
+ "value": "File uploaded successfully!"
+ },
+ {
+ "context": "ui",
+ "key": "Datei herunterladen",
+ "value": "Download file"
+ },
+ {
+ "context": "ui",
+ "key": "Datei hier ablegen...",
+ "value": "Drop file here..."
+ },
+ {
+ "context": "ui",
+ "key": "Datei hinzufügen",
+ "value": "Add File"
+ },
+ {
+ "context": "ui",
+ "key": "Datei hochladen",
+ "value": "Upload file"
+ },
+ {
+ "context": "ui",
+ "key": "Datei löschen",
+ "value": "Delete file"
+ },
+ {
+ "context": "ui",
+ "key": "Datei vorschauen",
+ "value": "Preview file"
+ },
+ {
+ "context": "ui",
+ "key": "Datei-Ablage während Workflow deaktiviert",
+ "value": "File drop disabled during workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien",
+ "value": "Files"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien anhängen",
+ "value": "Attach Files"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien auswählen",
+ "value": "Select files"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen",
+ "value": "Drop files here"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen zum Anhängen",
+ "value": "Drop files here to attach"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hierher ziehen",
+ "value": "Drag files here"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hochladen",
+ "value": "Upload files"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden geladen...",
+ "value": "Loading files..."
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden verarbeitet...",
+ "value": "Processing files..."
+ },
+ {
+ "context": "ui",
+ "key": "Dateigröße",
+ "value": "File Size"
+ },
+ {
+ "context": "ui",
+ "key": "Dateiname",
+ "value": "File Name"
+ },
+ {
+ "context": "ui",
+ "key": "Dateityp",
+ "value": "File Type"
+ },
+ {
+ "context": "ui",
+ "key": "Dateiverwaltung - Dokumente hochladen und organisieren",
+ "value": "File Management - Upload and organize documents"
+ },
+ {
+ "context": "ui",
+ "key": "Dateivorschau",
+ "value": "File Preview"
+ },
+ {
+ "context": "ui",
+ "key": "Daten aktualisieren",
+ "value": "Refresh data"
+ },
+ {
+ "context": "ui",
+ "key": "Daten empfangen",
+ "value": "Data Received"
+ },
+ {
+ "context": "ui",
+ "key": "Daten gesendet",
+ "value": "Data Sent"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung",
+ "value": "Data Management"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung - Datenimporte und -exporte verwalten",
+ "value": "Data Management - Handle data imports and exports"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung mit Tabellen",
+ "value": "Data management with tables"
+ },
+ {
+ "context": "ui",
+ "key": "Datum",
+ "value": "Date"
+ },
+ {
+ "context": "ui",
+ "key": "Dauer",
+ "value": "Duration"
+ },
+ {
+ "context": "ui",
+ "key": "Deutsch",
+ "value": "Deutsch"
+ },
+ {
+ "context": "ui",
+ "key": "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.",
+ "value": "The file \"{fileName}\" already exists with identical content. The existing file will be reused."
+ },
+ {
+ "context": "ui",
+ "key": "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?",
+ "value": "Creating a new language may consume AI credits from your mandate pool. Continue?"
+ },
+ {
+ "context": "ui",
+ "key": "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.",
+ "value": "This is your starting point for accessing all workspace features and tools."
+ },
+ {
+ "context": "ui",
+ "key": "Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": "This action cannot be undone."
+ },
+ {
+ "context": "ui",
+ "key": "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.",
+ "value": "This file appears to be corrupted. It has a PDF extension but contains text content. Please re-upload the file if possible."
+ },
+ {
+ "context": "ui",
+ "key": "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.",
+ "value": "This section contains all administration and management tools for your workspace."
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element auswählen",
+ "value": "Select this item"
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element kann nicht ausgewählt werden",
+ "value": "This item cannot be selected"
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden",
+ "value": "This field is managed by {provider} and cannot be changed"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument",
+ "value": "Document"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument erfolgreich erstellt",
+ "value": "Document created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument herunterladen",
+ "value": "Download document"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument vorschauen",
+ "value": "Preview document"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente",
+ "value": "Documents"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente auflisten",
+ "value": "List Documents"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumentname",
+ "value": "Document Name"
+ },
+ {
+ "context": "ui",
+ "key": "Dunkel",
+ "value": "Dark"
+ },
+ {
+ "context": "ui",
+ "key": "Durchsuchen",
+ "value": "Browse"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail",
+ "value": "Email"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse",
+ "value": "Email Address"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse ist erforderlich",
+ "value": "Email address is required"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Bestätigung",
+ "value": "Email Confirmation"
+ },
+ {
+ "context": "ui",
+ "key": "Echtzeit-Datensynchronisation:",
+ "value": "Real-time Data Synchronization:"
+ },
+ {
+ "context": "ui",
+ "key": "Eingereichte Daten:",
+ "value": "Submitted Data:"
+ },
+ {
+ "context": "ui",
+ "key": "Einrichtungsanruf",
+ "value": "Setup Call"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen",
+ "value": "Settings"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen erfolgreich gespeichert!",
+ "value": "Settings saved successfully!"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen werden in zukünftigen Updates hinzugefügt.",
+ "value": "Settings content will be added here in future updates."
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen wurden erfolgreich zurückgesetzt.",
+ "value": "Settings have been reset successfully."
+ },
+ {
+ "context": "ui",
+ "key": "Einträge pro Seite:",
+ "value": "Items per page:"
+ },
+ {
+ "context": "ui",
+ "key": "Empfänger",
+ "value": "Recipient"
+ },
+ {
+ "context": "ui",
+ "key": "Endzeit",
+ "value": "End Time"
+ },
+ {
+ "context": "ui",
+ "key": "English",
+ "value": "English"
+ },
+ {
+ "context": "ui",
+ "key": "Entdeckte Sites",
+ "value": "Discovered Sites"
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgreich",
+ "value": "Success"
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgsrate",
+ "value": "Success Rate"
+ },
+ {
+ "context": "ui",
+ "key": "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.",
+ "value": "Experience the future of client communication through our strategic partnership with Spitch.ai. This groundbreaking integration transforms your PowerOn platform into an intelligent telephony system that seamlessly connects external clients with companies."
+ },
+ {
+ "context": "ui",
+ "key": "Erneut versuchen",
+ "value": "Try again"
+ },
+ {
+ "context": "ui",
+ "key": "Erste Seite",
+ "value": "First page"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen",
+ "value": "Create"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.",
+ "value": "Create and manage RBAC roles and their permissions."
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen...",
+ "value": "Creating..."
+ },
+ {
+ "context": "ui",
+ "key": "Erstellt",
+ "value": "Created"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellte Dateien",
+ "value": "Created Files"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellungsdatum",
+ "value": "Creation Date"
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail",
+ "value": "External Email"
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail-Adresse eingeben",
+ "value": "Enter external email address"
+ },
+ {
+ "context": "ui",
+ "key": "Externen Benutzernamen eingeben",
+ "value": "Enter external username"
+ },
+ {
+ "context": "ui",
+ "key": "Externer Benutzername",
+ "value": "External Username"
+ },
+ {
+ "context": "ui",
+ "key": "FEHLER",
+ "value": "ERROR"
+ },
+ {
+ "context": "ui",
+ "key": "FEHLGESCHLAGEN",
+ "value": "FAILED"
+ },
+ {
+ "context": "ui",
+ "key": "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.",
+ "value": "If you have any questions about your mandate or the integration process, please don't hesitate to contact our support team."
+ },
+ {
+ "context": "ui",
+ "key": "Fehler",
+ "value": "Error"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Aktualisieren der Benutzerinformationen",
+ "value": "Error updating user information"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Automatisierung",
+ "value": "Error creating automation"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Organisation",
+ "value": "Error creating organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Position",
+ "value": "Error creating position"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der RBAC-Regel",
+ "value": "Error creating RBAC rule"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Rolle",
+ "value": "Error creating role"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Dokuments",
+ "value": "Error creating document"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Mandats",
+ "value": "Error creating mandate"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Prompts",
+ "value": "Error creating prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Team-Mitglieds",
+ "value": "Error creating team member"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Vertrags",
+ "value": "Error creating contract"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Zugriffs",
+ "value": "Error creating access"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer",
+ "value": "Error loading users"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer:",
+ "value": "Error loading users:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzerinformationen",
+ "value": "Error loading user information"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Dateien:",
+ "value": "Error loading files:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Logs",
+ "value": "Error loading logs"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Nachrichten:",
+ "value": "Error loading messages:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts",
+ "value": "Error loading prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts:",
+ "value": "Error loading prompts:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der SharePoint Dokumente:",
+ "value": "Error loading SharePoint documents:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Vorschau",
+ "value": "Error loading preview"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Workflows:",
+ "value": "Error loading workflows:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Löschen",
+ "value": "Error deleting"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.",
+ "value": "Failed to save settings. Please try again."
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Teilen des Prompts",
+ "value": "Error sharing prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Verarbeiten der Dateien",
+ "value": "Error processing files"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler:",
+ "value": "Error:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehlgeschlagen",
+ "value": "Failed"
+ },
+ {
+ "context": "ui",
+ "key": "Filter löschen",
+ "value": "Clear filter"
+ },
+ {
+ "context": "ui",
+ "key": "Firma",
+ "value": "Company"
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname",
+ "value": "Company Name"
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname ist erforderlich",
+ "value": "Company name is required"
+ },
+ {
+ "context": "ui",
+ "key": "Folgenachricht wird gesendet...",
+ "value": "Sending follow-up message..."
+ },
+ {
+ "context": "ui",
+ "key": "Fortfahren",
+ "value": "Continue"
+ },
+ {
+ "context": "ui",
+ "key": "Fortsetzen",
+ "value": "Continue"
+ },
+ {
+ "context": "ui",
+ "key": "Fragen?",
+ "value": "Questions?"
+ },
+ {
+ "context": "ui",
+ "key": "Français",
+ "value": "Français"
+ },
+ {
+ "context": "ui",
+ "key": "Fügen Sie eine Nachricht für die Empfänger hinzu",
+ "value": "Add a message for recipients"
+ },
+ {
+ "context": "ui",
+ "key": "GESTOPPT",
+ "value": "STOPPED"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Ihren Firmennamen ein",
+ "value": "Enter your company name"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.",
+ "value": "Give customers a fast and efficient self-service for voice and text queries that's available 24/7."
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie den Inhalt des Prompts ein",
+ "value": "Enter the prompt content"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen Namen für den Prompt ein",
+ "value": "Enter a name for the prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen benutzerdefinierten Titel ein",
+ "value": "Enter a custom title"
+ },
+ {
+ "context": "ui",
+ "key": "Geplante und automatisierte Workflows",
+ "value": "Scheduled and automated workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten",
+ "value": "Business Hours"
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten & Zeitzone",
+ "value": "Business Hours & Timezone"
+ },
+ {
+ "context": "ui",
+ "key": "Gespräch fortsetzen...",
+ "value": "Continue the conversation..."
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet",
+ "value": "Started"
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet:",
+ "value": "Started:"
+ },
+ {
+ "context": "ui",
+ "key": "Gestoppt",
+ "value": "Stopped"
+ },
+ {
+ "context": "ui",
+ "key": "Geteilt",
+ "value": "Shared"
+ },
+ {
+ "context": "ui",
+ "key": "Geteilte Dateien",
+ "value": "Shared Files"
+ },
+ {
+ "context": "ui",
+ "key": "Globale Sprachsets verwalten (SysAdmin).",
+ "value": "Manage global UI language sets (SysAdmin)."
+ },
+ {
+ "context": "ui",
+ "key": "Google",
+ "value": "Google"
+ },
+ {
+ "context": "ui",
+ "key": "Google verbinden",
+ "value": "Connect Google"
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung erstellen",
+ "value": "Create Google Connection"
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung hinzufügen",
+ "value": "Add Google Connection"
+ },
+ {
+ "context": "ui",
+ "key": "Grundlegende Daten und Ressourcen",
+ "value": "Basic data and resources"
+ },
+ {
+ "context": "ui",
+ "key": "Größe",
+ "value": "Size"
+ },
+ {
+ "context": "ui",
+ "key": "Hell",
+ "value": "Light"
+ },
+ {
+ "context": "ui",
+ "key": "Herunterladen",
+ "value": "Download"
+ },
+ {
+ "context": "ui",
+ "key": "Hinzufügen",
+ "value": "Add"
+ },
+ {
+ "context": "ui",
+ "key": "Hochgeladen",
+ "value": "Uploaded"
+ },
+ {
+ "context": "ui",
+ "key": "Hochladen",
+ "value": "Upload"
+ },
+ {
+ "context": "ui",
+ "key": "ID",
+ "value": "ID"
+ },
+ {
+ "context": "ui",
+ "key": "INFO",
+ "value": "INFO"
+ },
+ {
+ "context": "ui",
+ "key": "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.",
+ "value": "Identify and authenticate callers in seconds with continuous verification and security."
+ },
+ {
+ "context": "ui",
+ "key": "Ihre Anfrage wird verarbeitet...",
+ "value": "Processing your request..."
+ },
+ {
+ "context": "ui",
+ "key": "Inaktiv",
+ "value": "Inactive"
+ },
+ {
+ "context": "ui",
+ "key": "Information",
+ "value": "Information"
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt",
+ "value": "Content"
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt ist erforderlich",
+ "value": "Content is required"
+ },
+ {
+ "context": "ui",
+ "key": "Ja",
+ "value": "Yes"
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt anmelden",
+ "value": "Sign Up Now"
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt überspringen",
+ "value": "Skip for Now"
+ },
+ {
+ "context": "ui",
+ "key": "KI-erstellt",
+ "value": "AI-created"
+ },
+ {
+ "context": "ui",
+ "key": "KI-gestützte Dokumentengenerierung:",
+ "value": "AI-Powered Document Generation:"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Auth-Anbieter",
+ "value": "No Auth Authority"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Benutzername",
+ "value": "No Username"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Nachrichteninhalt verfügbar",
+ "value": "No message content available"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Name",
+ "value": "No Name"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Workflow ausgewählt",
+ "value": "No workflow selected"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Benutzer verfügbar",
+ "value": "No users available"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung",
+ "value": "No Privilege"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung zum Löschen des Prompts",
+ "value": "No permission to delete prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Dateien gefunden.",
+ "value": "No files found."
+ },
+ {
+ "context": "ui",
+ "key": "Keine E-Mail",
+ "value": "No Email"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Einträge",
+ "value": "No entries"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Logs für diesen Workflow verfügbar",
+ "value": "No logs available for this workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.",
+ "value": "No Microsoft connections found. Please create a connection first."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Prompts verfügbar",
+ "value": "No prompts available"
+ },
+ {
+ "context": "ui",
+ "key": "Keine SharePoint-Sites gefunden",
+ "value": "No SharePoint sites found"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.",
+ "value": "No speech integration data found. Please sign up first to access settings."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprache",
+ "value": "No Language"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Transkripte vorhanden",
+ "value": "No transcripts available"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Vorschau verfügbar",
+ "value": "No preview available"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows gefunden",
+ "value": "No workflows found"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows verfügbar",
+ "value": "No workflows available"
+ },
+ {
+ "context": "ui",
+ "key": "Keine hochgeladenen Dateien gefunden.",
+ "value": "No uploaded files found."
+ },
+ {
+ "context": "ui",
+ "key": "Keine mit Ihnen geteilten Dateien gefunden.",
+ "value": "No shared files found."
+ },
+ {
+ "context": "ui",
+ "key": "Keine von der KI erstellten Dateien gefunden.",
+ "value": "No AI-created files found."
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen",
+ "value": "Click again to confirm"
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen der Löschung",
+ "value": "Click again to confirm deletion"
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie, um zu öffnen",
+ "value": "Click to open"
+ },
+ {
+ "context": "ui",
+ "key": "Knowledge Agent (KA)",
+ "value": "Knowledge Agent (KA)"
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.",
+ "value": "Configure administrative settings and system preferences."
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.",
+ "value": "Configure and manage Role-Based Access Control rules."
+ },
+ {
+ "context": "ui",
+ "key": "Kontakte einrichten",
+ "value": "Setup Contacts"
+ },
+ {
+ "context": "ui",
+ "key": "Kontaktinformationen",
+ "value": "Contact Information"
+ },
+ {
+ "context": "ui",
+ "key": "Kontostatus",
+ "value": "Account Status"
+ },
+ {
+ "context": "ui",
+ "key": "Kopieren",
+ "value": "Copy"
+ },
+ {
+ "context": "ui",
+ "key": "Kosteneinsparungen & Effizienz:",
+ "value": "Cost Savings & Efficiency:"
+ },
+ {
+ "context": "ui",
+ "key": "Kundenverträge verwalten",
+ "value": "Manage customer contracts"
+ },
+ {
+ "context": "ui",
+ "key": "Lade Fortschritt...",
+ "value": "Loading progress..."
+ },
+ {
+ "context": "ui",
+ "key": "Laden...",
+ "value": "Downloading..."
+ },
+ {
+ "context": "ui",
+ "key": "Land",
+ "value": "Country"
+ },
+ {
+ "context": "ui",
+ "key": "Land ist erforderlich",
+ "value": "Country is required"
+ },
+ {
+ "context": "ui",
+ "key": "Leer = Zugriff auf alle Verträge",
+ "value": "Empty = Access to all contracts"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität",
+ "value": "Last Activity"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität:",
+ "value": "Last Activity:"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit",
+ "value": "Recent Activities - View your latest work"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Seite",
+ "value": "Last page"
+ },
+ {
+ "context": "ui",
+ "key": "Link konnte nicht gesendet werden",
+ "value": "Failed to send link"
+ },
+ {
+ "context": "ui",
+ "key": "Log",
+ "value": "Log"
+ },
+ {
+ "context": "ui",
+ "key": "Logs konnten nicht geladen werden",
+ "value": "Failed to fetch logs"
+ },
+ {
+ "context": "ui",
+ "key": "Logs werden geladen...",
+ "value": "Loading logs..."
+ },
+ {
+ "context": "ui",
+ "key": "Lokal",
+ "value": "Local"
+ },
+ {
+ "context": "ui",
+ "key": "LÄUFT",
+ "value": "RUNNING"
+ },
+ {
+ "context": "ui",
+ "key": "Lädt hoch...",
+ "value": "Uploading..."
+ },
+ {
+ "context": "ui",
+ "key": "Läuft",
+ "value": "Running"
+ },
+ {
+ "context": "ui",
+ "key": "Läuft ab am",
+ "value": "Expires At"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen",
+ "value": "Delete"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen ({count})",
+ "value": "Delete ({count})"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen...",
+ "value": "Deleting..."
+ },
+ {
+ "context": "ui",
+ "key": "MIME-Typ",
+ "value": "MIME Type"
+ },
+ {
+ "context": "ui",
+ "key": "Management-Tools umfassen:",
+ "value": "Management tools include:"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.",
+ "value": "Clients can switch to the technical SIP number at any time and save significant telephony costs. The integration works like another connector (Outlook, SharePoint) and is seamlessly integrated into your existing workflow."
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich eingereicht!",
+ "value": "Mandate Submitted Successfully!"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich erstellt",
+ "value": "Mandate created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erstellen",
+ "value": "Create Mandate"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat hinzufügen",
+ "value": "Add Mandate"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat-ID",
+ "value": "Mandate ID"
+ },
+ {
+ "context": "ui",
+ "key": "Mandate",
+ "value": "Mandates"
+ },
+ {
+ "context": "ui",
+ "key": "Mandate und Berechtigungen verwalten",
+ "value": "Manage mandates and permissions"
+ },
+ {
+ "context": "ui",
+ "key": "Mandatsverwaltung",
+ "value": "Mandate management"
+ },
+ {
+ "context": "ui",
+ "key": "Mehr erfahren",
+ "value": "Learn more"
+ },
+ {
+ "context": "ui",
+ "key": "Meine Uploads",
+ "value": "My Uploads"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft",
+ "value": "Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft Verbindungen",
+ "value": "Microsoft Connections"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft verbinden",
+ "value": "Connect Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung erstellen",
+ "value": "Create Microsoft Connection"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung hinzufügen",
+ "value": "Add Microsoft Connection"
+ },
+ {
+ "context": "ui",
+ "key": "Mitglied hinzufügen",
+ "value": "Add Member"
+ },
+ {
+ "context": "ui",
+ "key": "MwSt %",
+ "value": "VAT %"
+ },
+ {
+ "context": "ui",
+ "key": "MwSt Betrag",
+ "value": "VAT Amount"
+ },
+ {
+ "context": "ui",
+ "key": "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.",
+ "value": "Would you like to setup contacts for your mandate now? You can also do this later in settings."
+ },
+ {
+ "context": "ui",
+ "key": "Nach unten scrollen",
+ "value": "Scroll to bottom"
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht (optional)",
+ "value": "Message (optional)"
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht eingeben...",
+ "value": "Enter message..."
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht wird gesendet...",
+ "value": "Sending message..."
+ },
+ {
+ "context": "ui",
+ "key": "Nachrichten",
+ "value": "Messages"
+ },
+ {
+ "context": "ui",
+ "key": "Nahtloser Mandanten-Workflow:",
+ "value": "Seamless Client Workflow:"
+ },
+ {
+ "context": "ui",
+ "key": "Name",
+ "value": "Name"
+ },
+ {
+ "context": "ui",
+ "key": "Name des Unternehmens",
+ "value": "Company name"
+ },
+ {
+ "context": "ui",
+ "key": "Name ist erforderlich",
+ "value": "Name is required"
+ },
+ {
+ "context": "ui",
+ "key": "Navigation - Erkunden Sie alle verfügbaren Tools",
+ "value": "Navigation - Explore all available tools"
+ },
+ {
+ "context": "ui",
+ "key": "Nein",
+ "value": "No"
+ },
+ {
+ "context": "ui",
+ "key": "Neu starten",
+ "value": "Start Over"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung",
+ "value": "New Automation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung erstellen",
+ "value": "Create New Automation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Datei hochladen",
+ "value": "Upload new file"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation",
+ "value": "New Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation erstellen",
+ "value": "Create New Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position",
+ "value": "New Position"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position erstellen",
+ "value": "Create New Position"
+ },
+ {
+ "context": "ui",
+ "key": "Neue RBAC-Regel erstellen",
+ "value": "Create New RBAC Rule"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle",
+ "value": "New Role"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle erstellen",
+ "value": "Create New Role"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Sprache",
+ "value": "New language"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Prompt erstellen",
+ "value": "Create New Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Vertrag erstellen",
+ "value": "Create New Contract"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Zugriff erstellen",
+ "value": "Create New Access"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Prompt",
+ "value": "New Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Vertrag",
+ "value": "New Contract"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Zugriff",
+ "value": "New Access"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument",
+ "value": "New Document"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument erstellen",
+ "value": "Create New Document"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Mandat erstellen",
+ "value": "Create New Mandate"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Team-Mitglied erstellen",
+ "value": "Create New Team Member"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Transkript",
+ "value": "New Transcript"
+ },
+ {
+ "context": "ui",
+ "key": "Nicht verfügbar",
+ "value": "N/A"
+ },
+ {
+ "context": "ui",
+ "key": "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.",
+ "value": "No commands executed yet. Send a command to see results here."
+ },
+ {
+ "context": "ui",
+ "key": "Noch keinen Workflow ausgewählt",
+ "value": "No workflow selected"
+ },
+ {
+ "context": "ui",
+ "key": "Nochmal versuchen",
+ "value": "Try Again"
+ },
+ {
+ "context": "ui",
+ "key": "Nächste Seite",
+ "value": "Next page"
+ },
+ {
+ "context": "ui",
+ "key": "Oder geben Sie Ihre Nachricht ein...",
+ "value": "Or enter your message..."
+ },
+ {
+ "context": "ui",
+ "key": "Ordnerpfade",
+ "value": "Folder Paths"
+ },
+ {
+ "context": "ui",
+ "key": "Organisation",
+ "value": "Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Organisation erfolgreich erstellt",
+ "value": "Organisation created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Organisationen",
+ "value": "Organisations"
+ },
+ {
+ "context": "ui",
+ "key": "Originalbetrag",
+ "value": "Original Amount"
+ },
+ {
+ "context": "ui",
+ "key": "Originalwährung",
+ "value": "Original Currency"
+ },
+ {
+ "context": "ui",
+ "key": "PDF",
+ "value": "PDF"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort",
+ "value": "Password"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort eingeben",
+ "value": "Enter password"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link gesendet!",
+ "value": "Password link sent!"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link senden",
+ "value": "Send password setup link"
+ },
+ {
+ "context": "ui",
+ "key": "Pfad",
+ "value": "Path"
+ },
+ {
+ "context": "ui",
+ "key": "Position erfolgreich erstellt",
+ "value": "Position created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Positionen",
+ "value": "Positions"
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl",
+ "value": "Postal Code"
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl ist erforderlich",
+ "value": "Postal code is required"
+ },
+ {
+ "context": "ui",
+ "key": "Projekte",
+ "value": "Projects"
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung",
+ "value": "Project Management"
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung und -organisation",
+ "value": "Project management and organization"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt",
+ "value": "Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Einstellungen",
+ "value": "Prompt Settings"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Vorlage",
+ "value": "Prompt Template"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt ausführen",
+ "value": "Run prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt auswählen...",
+ "value": "Select a prompt..."
+ },
+ {
+ "context": "ui",
+ "key": "Prompt bearbeiten",
+ "value": "Edit Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erfolgreich erstellt",
+ "value": "Prompt created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erstellen",
+ "value": "Create Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt hinzufügen",
+ "value": "Add Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt löschen",
+ "value": "Clear prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt teilen",
+ "value": "Share Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt wird gelöscht...",
+ "value": "Deleting prompt..."
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt",
+ "value": "Prompt Content"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten",
+ "value": "Prompt content cannot exceed 10,000 characters"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf nicht leer sein",
+ "value": "Prompt content cannot be empty"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name",
+ "value": "Prompt Name"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf 100 Zeichen nicht überschreiten",
+ "value": "Prompt name cannot exceed 100 characters"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf nicht leer sein",
+ "value": "Prompt name cannot be empty"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts",
+ "value": "Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts für Ihren KI-Assistenten erstellen und verwalten",
+ "value": "Create and manage prompts for your AI assistant"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts verwalten",
+ "value": "Manage your prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts werden geladen...",
+ "value": "Loading prompts..."
+ },
+ {
+ "context": "ui",
+ "key": "Python",
+ "value": "Python"
+ },
+ {
+ "context": "ui",
+ "key": "Quelle",
+ "value": "Source"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel erfolgreich erstellt",
+ "value": "RBAC rule created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel hinzufügen",
+ "value": "Add RBAC Rule"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regeln",
+ "value": "RBAC Rules"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regelverwaltung",
+ "value": "RBAC rules management"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollen",
+ "value": "RBAC Roles"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollenverwaltung",
+ "value": "RBAC role management"
+ },
+ {
+ "context": "ui",
+ "key": "Registrieren",
+ "value": "Register"
+ },
+ {
+ "context": "ui",
+ "key": "Revolutionäre Telefonie-Integration mit Spitch.ai",
+ "value": "Revolutionary Telephony Integration with Spitch.ai"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle",
+ "value": "Role"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle erfolgreich erstellt",
+ "value": "Role created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle hinzufügen",
+ "value": "Add Role"
+ },
+ {
+ "context": "ui",
+ "key": "Rollen",
+ "value": "Roles"
+ },
+ {
+ "context": "ui",
+ "key": "Rollen-ID",
+ "value": "Role ID"
+ },
+ {
+ "context": "ui",
+ "key": "Rollenbasierte Zugriffssteuerungsregeln",
+ "value": "Role-Based Access Control rules"
+ },
+ {
+ "context": "ui",
+ "key": "Rollenverwaltung",
+ "value": "Role management"
+ },
+ {
+ "context": "ui",
+ "key": "Rufname am Telefon",
+ "value": "Phone Name"
+ },
+ {
+ "context": "ui",
+ "key": "Runde",
+ "value": "Round"
+ },
+ {
+ "context": "ui",
+ "key": "Runden",
+ "value": "Rounds"
+ },
+ {
+ "context": "ui",
+ "key": "Schließen",
+ "value": "Close"
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff",
+ "value": "Quick Access"
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff - Springen Sie zu häufig verwendeten Features",
+ "value": "Quick Access - Jump to frequently used features"
+ },
+ {
+ "context": "ui",
+ "key": "Seite {page} von {total} ({count} Einträge)",
+ "value": "Page {page} of {total} ({count} items)"
+ },
+ {
+ "context": "ui",
+ "key": "Senden",
+ "value": "Send"
+ },
+ {
+ "context": "ui",
+ "key": "Service",
+ "value": "Service"
+ },
+ {
+ "context": "ui",
+ "key": "Service-Verbindungen",
+ "value": "Service Connections"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Dokumente",
+ "value": "SharePoint Documents"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Site URL",
+ "value": "SharePoint Site URL"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Test",
+ "value": "SharePoint Test"
+ },
+ {
+ "context": "ui",
+ "key": "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.",
+ "value": "You will receive a confirmation email within the next few minutes."
+ },
+ {
+ "context": "ui",
+ "key": "Sie können auch auf den Upload-Button klicken",
+ "value": "You can also click the upload button"
+ },
+ {
+ "context": "ui",
+ "key": "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.",
+ "value": "You must first sign up for speech integration to access transcript management."
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?",
+ "value": "Are you sure you want to delete \"{name}\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?",
+ "value": "Are you sure you want to delete workflow \"{id}...\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": "Are you sure you want to reset all speech integration settings? This action cannot be undone."
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?",
+ "value": "Are you sure you want to delete workflow \"{name}\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?",
+ "value": "Are you sure you want to delete the file \"{name}\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?",
+ "value": "Are you sure you want to delete the {count} selected items?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?",
+ "value": "Are you sure you want to delete the {service} connection?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?",
+ "value": "Are you sure you want to delete this user?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?",
+ "value": "Are you sure you want to delete {count} users?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?",
+ "value": "Are you sure you want to delete {count} prompts?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?",
+ "value": "Are you sure you want to delete {count} connections?"
+ },
+ {
+ "context": "ui",
+ "key": "Sites entdecken",
+ "value": "Discover Sites"
+ },
+ {
+ "context": "ui",
+ "key": "Speech Analytics (SA)",
+ "value": "Speech Analytics (SA)"
+ },
+ {
+ "context": "ui",
+ "key": "Speichern",
+ "value": "Save"
+ },
+ {
+ "context": "ui",
+ "key": "Speichern...",
+ "value": "Saving..."
+ },
+ {
+ "context": "ui",
+ "key": "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.",
+ "value": "Spitch checks client authorization with PowerOn before each call, while all data changes are centrally initiated by PowerOn. Call transcripts are stored in real-time in your PowerOn database with complete client isolation and security. In case of failures, calls are automatically blocked to ensure integrity."
+ },
+ {
+ "context": "ui",
+ "key": "Sprach Integration",
+ "value": "Speech Integration"
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Einstellungen",
+ "value": "Speech Settings"
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Integration Einstellungen",
+ "value": "Speech Integration Settings"
+ },
+ {
+ "context": "ui",
+ "key": "Sprache",
+ "value": "Language"
+ },
+ {
+ "context": "ui",
+ "key": "Sprachset {code} wirklich löschen?",
+ "value": "Really delete language set {code}?"
+ },
+ {
+ "context": "ui",
+ "key": "Stadt",
+ "value": "City"
+ },
+ {
+ "context": "ui",
+ "key": "Stadt ist erforderlich",
+ "value": "City is required"
+ },
+ {
+ "context": "ui",
+ "key": "Start",
+ "value": "Start"
+ },
+ {
+ "context": "ui",
+ "key": "Startzeit",
+ "value": "Start Time"
+ },
+ {
+ "context": "ui",
+ "key": "Status",
+ "value": "Status"
+ },
+ {
+ "context": "ui",
+ "key": "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.",
+ "value": "Put everything your agents need at their fingertips, with a unified agent desktop."
+ },
+ {
+ "context": "ui",
+ "key": "Stoppen",
+ "value": "Stop"
+ },
+ {
+ "context": "ui",
+ "key": "Straße",
+ "value": "Street"
+ },
+ {
+ "context": "ui",
+ "key": "Straße ist erforderlich",
+ "value": "Street is required"
+ },
+ {
+ "context": "ui",
+ "key": "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.",
+ "value": "Search for locations by address or coordinates, or use natural language to create and manage projects."
+ },
+ {
+ "context": "ui",
+ "key": "Suchen...",
+ "value": "Search..."
+ },
+ {
+ "context": "ui",
+ "key": "Systemadministrator",
+ "value": "Sysadmin"
+ },
+ {
+ "context": "ui",
+ "key": "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren",
+ "value": "System Settings - Configure workspace settings"
+ },
+ {
+ "context": "ui",
+ "key": "Tabelle",
+ "value": "Spreadsheet"
+ },
+ {
+ "context": "ui",
+ "key": "Tags",
+ "value": "Tags"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Bereich",
+ "value": "Team Area"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglied erfolgreich erstellt",
+ "value": "Team member created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder",
+ "value": "Team Members"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten",
+ "value": "Manage your team members"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren",
+ "value": "Manage team members, set permissions, and configure collaboration settings"
+ },
+ {
+ "context": "ui",
+ "key": "Teilen",
+ "value": "Share"
+ },
+ {
+ "context": "ui",
+ "key": "Telefon",
+ "value": "Phone"
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer",
+ "value": "Phone Number"
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer ist erforderlich",
+ "value": "Phone number is required"
+ },
+ {
+ "context": "ui",
+ "key": "Text",
+ "value": "Text"
+ },
+ {
+ "context": "ui",
+ "key": "Textvorschau",
+ "value": "Text Preview"
+ },
+ {
+ "context": "ui",
+ "key": "Theme",
+ "value": "Theme"
+ },
+ {
+ "context": "ui",
+ "key": "Token",
+ "value": "Tokens"
+ },
+ {
+ "context": "ui",
+ "key": "Transkript",
+ "value": "Transcript"
+ },
+ {
+ "context": "ui",
+ "key": "Transkript wird verarbeitet...",
+ "value": "Processing transcript..."
+ },
+ {
+ "context": "ui",
+ "key": "Transkriptverwaltung",
+ "value": "Transcript Management"
+ },
+ {
+ "context": "ui",
+ "key": "Trennungsfehler",
+ "value": "Disconnect Error"
+ },
+ {
+ "context": "ui",
+ "key": "Treuhand",
+ "value": "Trustee"
+ },
+ {
+ "context": "ui",
+ "key": "Treuhandverwaltung",
+ "value": "Trustee Management"
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Organisationen verwalten",
+ "value": "Manage trustee organisations"
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Rollen verwalten",
+ "value": "Manage trustee roles"
+ },
+ {
+ "context": "ui",
+ "key": "Typ",
+ "value": "Type"
+ },
+ {
+ "context": "ui",
+ "key": "UI-Sprachen",
+ "value": "UI languages"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannt",
+ "value": "Unknown"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannte Größe",
+ "value": "Unknown Size"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekanntes Datum",
+ "value": "Unknown Date"
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannt",
+ "value": "Unnamed"
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannter Workflow",
+ "value": "Unnamed Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges Datum",
+ "value": "Invalid date"
+ },
+ {
+ "context": "ui",
+ "key": "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.",
+ "value": "Our team will review your mandate within 1-2 business days."
+ },
+ {
+ "context": "ui",
+ "key": "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.",
+ "value": "Our already active document extraction engine automatically generates personalized documents for Spitch based on client-specific data. The AI uses FAQ databases, employee information, and service details to make every call contextual and highly personalized."
+ },
+ {
+ "context": "ui",
+ "key": "Unternehmensinformationen",
+ "value": "Company Information"
+ },
+ {
+ "context": "ui",
+ "key": "Unterstützt von",
+ "value": "Powered by"
+ },
+ {
+ "context": "ui",
+ "key": "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.",
+ "value": "Upload failed. Please try again."
+ },
+ {
+ "context": "ui",
+ "key": "VERARBEITUNG",
+ "value": "PROCESSING"
+ },
+ {
+ "context": "ui",
+ "key": "Valutadatum",
+ "value": "Value Date"
+ },
+ {
+ "context": "ui",
+ "key": "Verarbeitung",
+ "value": "Processing"
+ },
+ {
+ "context": "ui",
+ "key": "Verbinden",
+ "value": "Connect"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung aktualisieren",
+ "value": "Update Connection"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung testen",
+ "value": "Test Connection"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen",
+ "value": "Connections"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen werden geladen...",
+ "value": "Loading connections..."
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungsfehler",
+ "value": "Connection Error"
+ },
+ {
+ "context": "ui",
+ "key": "Verbunden am",
+ "value": "Connected At"
+ },
+ {
+ "context": "ui",
+ "key": "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.",
+ "value": "Unify and deliver info to your customers and staff wherever and whenever they need it."
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Tools",
+ "value": "Available Tools"
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Workflows",
+ "value": "Available Workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Version",
+ "value": "Version"
+ },
+ {
+ "context": "ui",
+ "key": "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.",
+ "value": "Try reconnecting your Microsoft account in the Connections page."
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag",
+ "value": "Contract"
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag (optional)",
+ "value": "Contract (optional)"
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag erfolgreich erstellt",
+ "value": "Contract created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Verträge",
+ "value": "Contracts"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.",
+ "value": "Manage data through tables. Select a table or use natural language to execute commands."
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Kontoinformationen",
+ "value": "Manage your account information"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Service-Verbindungen",
+ "value": "Manage your service connections"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.",
+ "value": "Manage your speech integration configuration and preferences."
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Mandate und deren zugehörige Berechtigungen.",
+ "value": "Manage mandates and their associated permissions."
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltet von {provider}",
+ "value": "Managed by {provider}"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Benutzerzugriffe auf Organisationen",
+ "value": "Management of user access to organisations"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Buchungspositionen (Speseneinträge)",
+ "value": "Management of booking positions (expense entries)"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Dokumente und Belege",
+ "value": "Management of documents and receipts"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Feature-spezifischen Rollen",
+ "value": "Management of feature-specific roles"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Kundenverträge",
+ "value": "Management of customer contracts"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Treuhand-Organisationen",
+ "value": "Management of trustee organisations"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen",
+ "value": "Manage trustee organisations, contracts, and bookings"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltungs- und Management-Tools",
+ "value": "Administration and management tools"
+ },
+ {
+ "context": "ui",
+ "key": "Verwende Vorlage:",
+ "value": "Using prompt:"
+ },
+ {
+ "context": "ui",
+ "key": "Video",
+ "value": "Video"
+ },
+ {
+ "context": "ui",
+ "key": "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.",
+ "value": "Thank you for your interest in our Speech Integration powered by Spitch.ai. We have received your mandate and will review it shortly."
+ },
+ {
+ "context": "ui",
+ "key": "Virtual Assistant (VA)",
+ "value": "Virtual Assistant (VA)"
+ },
+ {
+ "context": "ui",
+ "key": "Voice Biometrics (VB)",
+ "value": "Voice Biometrics (VB)"
+ },
+ {
+ "context": "ui",
+ "key": "Vollständiger Name",
+ "value": "Full Name"
+ },
+ {
+ "context": "ui",
+ "key": "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.",
+ "value": "From registration to technical setup - your client registers with PowerOn for telephony services, uploads documents, and automatically receives a technical SIP number from Spitch. Call forwarding can be activated or deactivated at any time, ensuring maximum flexibility and BCM safety."
+ },
+ {
+ "context": "ui",
+ "key": "Vorherige Seite",
+ "value": "Previous page"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau",
+ "value": "Preview"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau für diesen Dateityp nicht verfügbar",
+ "value": "Preview not available for this file type"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau schließen",
+ "value": "Close preview"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau wird geladen...",
+ "value": "Loading preview..."
+ },
+ {
+ "context": "ui",
+ "key": "WARTEND",
+ "value": "PENDING"
+ },
+ {
+ "context": "ui",
+ "key": "Wartend",
+ "value": "Pending"
+ },
+ {
+ "context": "ui",
+ "key": "Was passiert als nächstes?",
+ "value": "What happens next?"
+ },
+ {
+ "context": "ui",
+ "key": "Wechseln Sie zwischen hellem und dunklem Modus",
+ "value": "Switch between light and dark mode"
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge",
+ "value": "Utils"
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge und Hilfsmittel",
+ "value": "Utilities and tools"
+ },
+ {
+ "context": "ui",
+ "key": "Wie möchten Sie am Telefon genannt werden?",
+ "value": "How would you like to be called on the phone?"
+ },
+ {
+ "context": "ui",
+ "key": "Wiederholen",
+ "value": "Retry"
+ },
+ {
+ "context": "ui",
+ "key": "Willkommen in Ihrem Arbeitsbereich",
+ "value": "Welcome to your workspace"
+ },
+ {
+ "context": "ui",
+ "key": "Wird gesendet...",
+ "value": "Sending..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird gestoppt...",
+ "value": "Stopping..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird geteilt...",
+ "value": "Sharing..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird hochgeladen...",
+ "value": "Uploading..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird verarbeitet...",
+ "value": "Processing..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow",
+ "value": "Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow Fortschritt",
+ "value": "Workflow Progress"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow auswählen",
+ "value": "Select Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fehlgeschlagen.",
+ "value": "Workflow failed."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fortsetzen",
+ "value": "Resume workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow läuft... Warte auf Logs...",
+ "value": "Workflow running... Waiting for logs..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow löschen",
+ "value": "Delete workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow stoppen",
+ "value": "Stop workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird fortgesetzt",
+ "value": "Continuing workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird gelöscht...",
+ "value": "Deleting workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Automatisierungen verwalten",
+ "value": "Manage workflow automations"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Nachrichten werden geladen...",
+ "value": "Loading workflow messages..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Verlauf",
+ "value": "Workflow History"
+ },
+ {
+ "context": "ui",
+ "key": "Workflows",
+ "value": "Workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Workflows werden geladen...",
+ "value": "Loading workflows..."
+ },
+ {
+ "context": "ui",
+ "key": "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow",
+ "value": "Select a workflow from the list or start a new workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Wählen Sie Ihre bevorzugte Sprache",
+ "value": "Choose your preferred language"
+ },
+ {
+ "context": "ui",
+ "key": "You",
+ "value": "You"
+ },
+ {
+ "context": "ui",
+ "key": "Zeitzone",
+ "value": "Timezone"
+ },
+ {
+ "context": "ui",
+ "key": "Zentrale",
+ "value": "Dashboard"
+ },
+ {
+ "context": "ui",
+ "key": "Zu dunklem Modus wechseln",
+ "value": "Switch to dark mode"
+ },
+ {
+ "context": "ui",
+ "key": "Zu hellem Modus wechseln",
+ "value": "Switch to light mode"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff",
+ "value": "Access"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff erfolgreich erstellt",
+ "value": "Access created successfully"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff verweigert",
+ "value": "Access Denied"
+ },
+ {
+ "context": "ui",
+ "key": "Zuletzt geprüft",
+ "value": "Last Checked"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken",
+ "value": "Click to confirm"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken...",
+ "value": "Click to confirm..."
+ },
+ {
+ "context": "ui",
+ "key": "Zurück zur Sprach Integration",
+ "value": "Back to Speech Integration"
+ },
+ {
+ "context": "ui",
+ "key": "angehängt",
+ "value": "attached"
+ },
+ {
+ "context": "ui",
+ "key": "ausgewählt",
+ "value": "selected"
+ },
+ {
+ "context": "ui",
+ "key": "kontakt@firma.com",
+ "value": "contact@company.com"
+ },
+ {
+ "context": "ui",
+ "key": "oder",
+ "value": "or"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Beleg.pdf",
+ "value": "e.g. Receipt.pdf"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Finanzdienstleistungen, Technologie, etc.",
+ "value": "e.g. Financial Services, Technology, etc."
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Muster AG 2026",
+ "value": "e.g. Muster AG 2026"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Treuhand AG Zürich",
+ "value": "e.g. Trustee AG Zurich"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. admin, operate, userreport",
+ "value": "e.g. admin, operate, userreport"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. treuhand-ag-zuerich",
+ "value": "e.g. trustee-ag-zurich"
+ },
+ {
+ "context": "ui",
+ "key": "{authority} Verbindung bearbeiten",
+ "value": "Edit {authority} Connection"
+ },
+ {
+ "context": "ui",
+ "key": "{column} filtern",
+ "value": "Filter {column}"
+ },
+ {
+ "context": "ui",
+ "key": "{count} Benutzer ausgewählt",
+ "value": "{count} users selected"
+ },
+ {
+ "context": "ui",
+ "key": "Änderungen speichern",
+ "value": "Save Changes"
+ },
+ {
+ "context": "ui",
+ "key": "Über",
+ "value": "About"
+ },
+ {
+ "context": "ui",
+ "key": "Überprüfungsprozess",
+ "value": "Review Process"
+ },
+ {
+ "context": "ui",
+ "key": "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates",
+ "value": "Overview - See workspace status and updates"
+ },
+ {
+ "context": "ui",
+ "key": "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.",
+ "value": "Automatically monitor 100% of conversations to get valuable insights for your business."
+ },
+ {
+ "context": "ui",
+ "key": "(gefiltert nach {name})",
+ "value": "(filtered by {name})"
+ },
+ {
+ "context": "ui",
+ "key": "({count} gefiltert)",
+ "value": "({count} filtered)"
+ },
+ {
+ "context": "ui",
+ "key": "Abonnement, Einstellungen und Guthaben pro Mandant",
+ "value": "Subscription, settings, and credit per tenant"
+ },
+ {
+ "context": "ui",
+ "key": "Abrechnung",
+ "value": "Billing"
+ },
+ {
+ "context": "ui",
+ "key": "Aktion",
+ "value": "Action"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Billing",
+ "value": "User billing"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Guthaben",
+ "value": "User credits"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer:",
+ "value": "User:"
+ },
+ {
+ "context": "ui",
+ "key": "Deaktiviert",
+ "value": "Disabled"
+ },
+ {
+ "context": "ui",
+ "key": "Du hast Zugriff auf {instanceCount} {instanceWord} in {mandateCount} {mandateWord}.",
+ "value": "You have access to {instanceCount} {instanceWord} in {mandateCount} {mandateWord}."
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen gespeichert!",
+ "value": "Settings saved!"
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanz",
+ "value": "Feature instance"
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanzen",
+ "value": "Feature instances"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern",
+ "value": "Error saving"
+ },
+ {
+ "context": "ui",
+ "key": "Gesamtguthaben",
+ "value": "Total credit"
+ },
+ {
+ "context": "ui",
+ "key": "Mandant:",
+ "value": "Tenant:"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten",
+ "value": "Tenants"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Billing",
+ "value": "Tenant billing"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Guthaben",
+ "value": "Tenant credits"
+ },
+ {
+ "context": "ui",
+ "key": "Mandant",
+ "value": "Tenant"
+ },
+ {
+ "context": "ui",
+ "key": "Niedrig",
+ "value": "Low"
+ },
+ {
+ "context": "ui",
+ "key": "Transaktionen",
+ "value": "Transactions"
+ },
+ {
+ "context": "ui",
+ "key": "Warnschwelle",
+ "value": "Warning threshold"
+ },
+ {
+ "context": "ui",
+ "key": "✓ Mandat eingereicht",
+ "value": "✓ Mandate Submitted"
+ }
+ ],
+ "status": "complete",
+ "isDefault": false
+ },
+ {
+ "id": "fr",
+ "label": "Français",
+ "entries": [
+ {
+ "context": "ui",
+ "key": "+41 123 456 789",
+ "value": "+41 123 456 789"
+ },
+ {
+ "context": "ui",
+ "key": "1 Benutzer ausgewählt",
+ "value": "1 utilisateur sélectionné"
+ },
+ {
+ "context": "ui",
+ "key": "ABGEBROCHEN",
+ "value": "ANNULÉ"
+ },
+ {
+ "context": "ui",
+ "key": "ABGESCHLOSSEN",
+ "value": "TERMINÉ"
+ },
+ {
+ "context": "ui",
+ "key": "Abbrechen",
+ "value": "Annuler"
+ },
+ {
+ "context": "ui",
+ "key": "Abgeschlossen",
+ "value": "Terminé"
+ },
+ {
+ "context": "ui",
+ "key": "Abmelden",
+ "value": "Se déconnecter"
+ },
+ {
+ "context": "ui",
+ "key": "Admin-Einstellungen",
+ "value": "Paramètres Admin"
+ },
+ {
+ "context": "ui",
+ "key": "Administrative Einstellungen",
+ "value": "Paramètres administratifs"
+ },
+ {
+ "context": "ui",
+ "key": "Administrator",
+ "value": "Administrateur"
+ },
+ {
+ "context": "ui",
+ "key": "Adresse",
+ "value": "Adresse"
+ },
+ {
+ "context": "ui",
+ "key": "Agent Assist (AA)",
+ "value": "Assistance Agent (AA)"
+ },
+ {
+ "context": "ui",
+ "key": "Aktionen",
+ "value": "Actions"
+ },
+ {
+ "context": "ui",
+ "key": "Aktiv",
+ "value": "Actif"
+ },
+ {
+ "context": "ui",
+ "key": "Aktiviert",
+ "value": "Activé"
+ },
+ {
+ "context": "ui",
+ "key": "Aktualisieren",
+ "value": "Mettre à jour"
+ },
+ {
+ "context": "ui",
+ "key": "Aktuelle Transkripte",
+ "value": "Transcriptions Récentes"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Dateien",
+ "value": "Tous les fichiers"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Elemente auswählen",
+ "value": "Sélectionner tous les éléments"
+ },
+ {
+ "context": "ui",
+ "key": "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?",
+ "value": "Synchroniser maintenant tous les jeux (sauf défaut) avec l’allemand ?"
+ },
+ {
+ "context": "ui",
+ "key": "Alle abwählen",
+ "value": "Tout désélectionner"
+ },
+ {
+ "context": "ui",
+ "key": "Alle aktualisieren",
+ "value": "Tout mettre à jour"
+ },
+ {
+ "context": "ui",
+ "key": "Alle auswählen",
+ "value": "Tout sélectionner"
+ },
+ {
+ "context": "ui",
+ "key": "Analysiere Workflow...",
+ "value": "Analyse du workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Anmelden",
+ "value": "Se connecter"
+ },
+ {
+ "context": "ui",
+ "key": "Anrufer",
+ "value": "Appelant"
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigen",
+ "value": "Voir"
+ },
+ {
+ "context": "ui",
+ "key": "Anzeigename",
+ "value": "Nom d’affichage"
+ },
+ {
+ "context": "ui",
+ "key": "Audio",
+ "value": "Audio"
+ },
+ {
+ "context": "ui",
+ "key": "Auf Standard zurücksetzen",
+ "value": "Réinitialiser par Défaut"
+ },
+ {
+ "context": "ui",
+ "key": "Aufgaben",
+ "value": "Tâches"
+ },
+ {
+ "context": "ui",
+ "key": "Ausführen",
+ "value": "Exécuter"
+ },
+ {
+ "context": "ui",
+ "key": "Ausgewählte Datei:",
+ "value": "Fichier sélectionné:"
+ },
+ {
+ "context": "ui",
+ "key": "Auth-Anbieter",
+ "value": "Autorité d'authentification"
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungsanbieter",
+ "value": "Fournisseur d'authentification"
+ },
+ {
+ "context": "ui",
+ "key": "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.",
+ "value": "Token d'authentification expiré ou invalide. Veuillez reconnecter votre compte Microsoft."
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierung erfolgreich erstellt",
+ "value": "Automatisation créée avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Automatisierungen",
+ "value": "Automatisations"
+ },
+ {
+ "context": "ui",
+ "key": "Basisdaten",
+ "value": "Données de Base"
+ },
+ {
+ "context": "ui",
+ "key": "Bearbeiten",
+ "value": "Modifier"
+ },
+ {
+ "context": "ui",
+ "key": "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")",
+ "value": "Entrez une commande (par exemple, \"Créer un nouveau projet nommé 'Rue Principale 42'\")"
+ },
+ {
+ "context": "ui",
+ "key": "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …",
+ "value": "Commencez une conversation en entrant un message, en sélectionnant un modèle ou en continuant un workflow précédent..."
+ },
+ {
+ "context": "ui",
+ "key": "Beginnen Sie mit:",
+ "value": "Commencez avec :"
+ },
+ {
+ "context": "ui",
+ "key": "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.",
+ "value": "Si approuvé, nous planifierons un appel de configuration pour configurer votre intégration."
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein Fehler aufgetreten.",
+ "value": "Une erreur s'est produite lors du téléchargement."
+ },
+ {
+ "context": "ui",
+ "key": "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.",
+ "value": "Une erreur inattendue s'est produite lors du téléchargement."
+ },
+ {
+ "context": "ui",
+ "key": "Belege verwalten",
+ "value": "Gérer les pièces justificatives"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer",
+ "value": "Utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer auswählen",
+ "value": "Sélectionner les utilisateurs"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer bearbeiten",
+ "value": "Modifier l'utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer erstellen",
+ "value": "Créer l'utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer hinzufügen",
+ "value": "Ajouter un utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer löschen",
+ "value": "Supprimer l'utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer werden geladen...",
+ "value": "Chargement des utilisateurs..."
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Zugriff verwalten",
+ "value": "Gérer les accès utilisateurs"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerdefinierter Titel (optional)",
+ "value": "Titre personnalisé (facultatif)"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen",
+ "value": "Informations utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen erfolgreich aktualisiert",
+ "value": "Informations utilisateur mises à jour avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerinformationen werden geladen...",
+ "value": "Chargement des informations utilisateur..."
+ },
+ {
+ "context": "ui",
+ "key": "Benutzername",
+ "value": "Nom d'utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten",
+ "value": "Gestion des Utilisateurs - Gérer les membres de l'équipe et les permissions"
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigung",
+ "value": "Privilège"
+ },
+ {
+ "context": "ui",
+ "key": "Berechtigungsstufe",
+ "value": "Niveau de privilège"
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung",
+ "value": "Description"
+ },
+ {
+ "context": "ui",
+ "key": "Beschreibung der Rolle",
+ "value": "Description du rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Betrachter",
+ "value": "Observateur"
+ },
+ {
+ "context": "ui",
+ "key": "Betreff",
+ "value": "Sujet"
+ },
+ {
+ "context": "ui",
+ "key": "Bezeichnung",
+ "value": "Libellé"
+ },
+ {
+ "context": "ui",
+ "key": "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.",
+ "value": "Offrez une assistance en chat en direct et déployez des chatbots intelligents sur tous les canaux."
+ },
+ {
+ "context": "ui",
+ "key": "Bild",
+ "value": "Image"
+ },
+ {
+ "context": "ui",
+ "key": "Bitte geben Sie eine gültige E-Mail-Adresse ein",
+ "value": "Veuillez entrer une adresse email valide"
+ },
+ {
+ "context": "ui",
+ "key": "Bitte wählen Sie mindestens einen Benutzer aus",
+ "value": "Veuillez sélectionner au moins un utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Branche",
+ "value": "Secteur"
+ },
+ {
+ "context": "ui",
+ "key": "Branche ist erforderlich",
+ "value": "Le secteur d'activité est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungsbetrag",
+ "value": "Montant de comptabilisation"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungspositionen verwalten",
+ "value": "Gérer les positions de réservation"
+ },
+ {
+ "context": "ui",
+ "key": "Buchungswährung",
+ "value": "Devise de comptabilisation"
+ },
+ {
+ "context": "ui",
+ "key": "Chat Platform (CP)",
+ "value": "Plateforme de Chat (CP)"
+ },
+ {
+ "context": "ui",
+ "key": "Chat leeren...",
+ "value": "Nouveau Chat"
+ },
+ {
+ "context": "ui",
+ "key": "Chatbereich",
+ "value": "Zone de chat"
+ },
+ {
+ "context": "ui",
+ "key": "Darstellung",
+ "value": "Apparence"
+ },
+ {
+ "context": "ui",
+ "key": "Datei",
+ "value": "Fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei anhängen",
+ "value": "Joindre un fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei bereits vorhanden",
+ "value": "Fichier Déjà Existant"
+ },
+ {
+ "context": "ui",
+ "key": "Datei entfernen",
+ "value": "Supprimer le fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei erfolgreich hochgeladen!",
+ "value": "Fichier téléchargé avec succès !"
+ },
+ {
+ "context": "ui",
+ "key": "Datei herunterladen",
+ "value": "Télécharger le fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei hier ablegen...",
+ "value": "Déposer le fichier ici..."
+ },
+ {
+ "context": "ui",
+ "key": "Datei hinzufügen",
+ "value": "Ajouter un fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei hochladen",
+ "value": "Télécharger un fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei löschen",
+ "value": "Supprimer le fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei vorschauen",
+ "value": "Aperçu du fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Datei-Ablage während Workflow deaktiviert",
+ "value": "Dépôt de fichiers désactivé pendant le workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien",
+ "value": "Fichiers"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien anhängen",
+ "value": "Joindre des fichiers"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien auswählen",
+ "value": "Sélectionner des fichiers"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen",
+ "value": "Déposer les fichiers ici"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hier ablegen zum Anhängen",
+ "value": "Déposez les fichiers ici pour les joindre"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hierher ziehen",
+ "value": "Glisser les fichiers ici"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien hochladen",
+ "value": "Télécharger des fichiers"
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden geladen...",
+ "value": "Chargement des fichiers..."
+ },
+ {
+ "context": "ui",
+ "key": "Dateien werden verarbeitet...",
+ "value": "Traitement des fichiers..."
+ },
+ {
+ "context": "ui",
+ "key": "Dateigröße",
+ "value": "Taille du fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Dateiname",
+ "value": "Nom du fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Dateityp",
+ "value": "Type de fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Dateiverwaltung - Dokumente hochladen und organisieren",
+ "value": "Gestion des Fichiers - Télécharger et organiser les documents"
+ },
+ {
+ "context": "ui",
+ "key": "Dateivorschau",
+ "value": "Aperçu du fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Daten aktualisieren",
+ "value": "Actualiser les données"
+ },
+ {
+ "context": "ui",
+ "key": "Daten empfangen",
+ "value": "Données reçues"
+ },
+ {
+ "context": "ui",
+ "key": "Daten gesendet",
+ "value": "Données envoyées"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung",
+ "value": "Gestion des données"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung - Datenimporte und -exporte verwalten",
+ "value": "Gestion des Données - Gérer les imports et exports de données"
+ },
+ {
+ "context": "ui",
+ "key": "Datenverwaltung mit Tabellen",
+ "value": "Gestion des données avec des tableaux"
+ },
+ {
+ "context": "ui",
+ "key": "Datum",
+ "value": "Date"
+ },
+ {
+ "context": "ui",
+ "key": "Dauer",
+ "value": "Durée"
+ },
+ {
+ "context": "ui",
+ "key": "Deutsch",
+ "value": "Deutsch"
+ },
+ {
+ "context": "ui",
+ "key": "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.",
+ "value": "Le fichier \"{fileName}\" existe déjà avec un contenu identique. Le fichier existant sera réutilisé."
+ },
+ {
+ "context": "ui",
+ "key": "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?",
+ "value": "Créer une nouvelle langue peut consommer des crédits IA sur le pool du mandat. Continuer ?"
+ },
+ {
+ "context": "ui",
+ "key": "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.",
+ "value": "Ceci est votre point de départ pour accéder à toutes les fonctionnalités et outils de votre espace de travail."
+ },
+ {
+ "context": "ui",
+ "key": "Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": "Cette action ne peut pas être annulée."
+ },
+ {
+ "context": "ui",
+ "key": "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.",
+ "value": "Ce fichier semble être corrompu. Il a une extension PDF mais contient du contenu texte. Veuillez le télécharger à nouveau si possible."
+ },
+ {
+ "context": "ui",
+ "key": "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.",
+ "value": "Cette section contient tous les outils d'administration et de gestion pour votre espace de travail."
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element auswählen",
+ "value": "Sélectionner cet élément"
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Element kann nicht ausgewählt werden",
+ "value": "Cet élément ne peut pas être sélectionné"
+ },
+ {
+ "context": "ui",
+ "key": "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden",
+ "value": "Ce champ est géré par {provider} et ne peut pas être modifié"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument",
+ "value": "Document"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument erfolgreich erstellt",
+ "value": "Document créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument herunterladen",
+ "value": "Télécharger le document"
+ },
+ {
+ "context": "ui",
+ "key": "Dokument vorschauen",
+ "value": "Aperçu du document"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente",
+ "value": "Documents"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumente auflisten",
+ "value": "Lister les documents"
+ },
+ {
+ "context": "ui",
+ "key": "Dokumentname",
+ "value": "Nom du document"
+ },
+ {
+ "context": "ui",
+ "key": "Dunkel",
+ "value": "Sombre"
+ },
+ {
+ "context": "ui",
+ "key": "Durchsuchen",
+ "value": "Parcourir"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail",
+ "value": "Email"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse",
+ "value": "Adresse Email"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Adresse ist erforderlich",
+ "value": "L'adresse email est requise"
+ },
+ {
+ "context": "ui",
+ "key": "E-Mail-Bestätigung",
+ "value": "Confirmation par Email"
+ },
+ {
+ "context": "ui",
+ "key": "Echtzeit-Datensynchronisation:",
+ "value": "Synchronisation de Données en Temps Réel:"
+ },
+ {
+ "context": "ui",
+ "key": "Eingereichte Daten:",
+ "value": "Données Soumises :"
+ },
+ {
+ "context": "ui",
+ "key": "Einrichtungsanruf",
+ "value": "Appel de Configuration"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen",
+ "value": "Paramètres"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen erfolgreich gespeichert!",
+ "value": "Paramètres sauvegardés avec succès !"
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen werden in zukünftigen Updates hinzugefügt.",
+ "value": "Le contenu des paramètres sera ajouté dans les futures mises à jour."
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen wurden erfolgreich zurückgesetzt.",
+ "value": "Les paramètres ont été réinitialisés avec succès."
+ },
+ {
+ "context": "ui",
+ "key": "Einträge pro Seite:",
+ "value": "Éléments par page:"
+ },
+ {
+ "context": "ui",
+ "key": "Empfänger",
+ "value": "Destinataire"
+ },
+ {
+ "context": "ui",
+ "key": "Endzeit",
+ "value": "Heure de Fin"
+ },
+ {
+ "context": "ui",
+ "key": "English",
+ "value": "English"
+ },
+ {
+ "context": "ui",
+ "key": "Entdeckte Sites",
+ "value": "Sites découverts"
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgreich",
+ "value": "Succès"
+ },
+ {
+ "context": "ui",
+ "key": "Erfolgsrate",
+ "value": "Taux de succès"
+ },
+ {
+ "context": "ui",
+ "key": "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.",
+ "value": "Découvrez l'avenir de la communication client grâce à notre partenariat stratégique avec Spitch.ai. Cette intégration révolutionnaire transforme votre plateforme PowerOn en un système téléphonique intelligent qui connecte de manière transparente les clients externes avec les entreprises."
+ },
+ {
+ "context": "ui",
+ "key": "Erneut versuchen",
+ "value": "Réessayer"
+ },
+ {
+ "context": "ui",
+ "key": "Erste Seite",
+ "value": "Première page"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen",
+ "value": "Créer"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.",
+ "value": "Créez et gérez les rôles RBAC et leurs permissions."
+ },
+ {
+ "context": "ui",
+ "key": "Erstellen...",
+ "value": "Création..."
+ },
+ {
+ "context": "ui",
+ "key": "Erstellt",
+ "value": "Créé"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellte Dateien",
+ "value": "Fichiers créés"
+ },
+ {
+ "context": "ui",
+ "key": "Erstellungsdatum",
+ "value": "Date de création"
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail",
+ "value": "E-mail externe"
+ },
+ {
+ "context": "ui",
+ "key": "Externe E-Mail-Adresse eingeben",
+ "value": "Entrez l'adresse e-mail externe"
+ },
+ {
+ "context": "ui",
+ "key": "Externen Benutzernamen eingeben",
+ "value": "Entrez le nom d'utilisateur externe"
+ },
+ {
+ "context": "ui",
+ "key": "Externer Benutzername",
+ "value": "Nom d'utilisateur externe"
+ },
+ {
+ "context": "ui",
+ "key": "FEHLER",
+ "value": "ERREUR"
+ },
+ {
+ "context": "ui",
+ "key": "FEHLGESCHLAGEN",
+ "value": "ÉCHEC"
+ },
+ {
+ "context": "ui",
+ "key": "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.",
+ "value": "Si vous avez des questions sur votre mandat ou le processus d'intégration, n'hésitez pas à contacter notre équipe de support."
+ },
+ {
+ "context": "ui",
+ "key": "Fehler",
+ "value": "Erreur"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Aktualisieren der Benutzerinformationen",
+ "value": "Erreur lors de la mise à jour des informations utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Automatisierung",
+ "value": "Erreur lors de la création de l'automatisation"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Organisation",
+ "value": "Erreur lors de la création de l'organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Position",
+ "value": "Erreur lors de la création de la position"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der RBAC-Regel",
+ "value": "Erreur lors de la création de la règle RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen der Rolle",
+ "value": "Erreur lors de la création du rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Dokuments",
+ "value": "Erreur lors de la création du document"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Mandats",
+ "value": "Erreur lors de la création du mandat"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Prompts",
+ "value": "Erreur lors de la création du prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Team-Mitglieds",
+ "value": "Erreur lors de la création du membre de l'équipe"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Vertrags",
+ "value": "Erreur lors de la création du contrat"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Erstellen des Zugriffs",
+ "value": "Erreur lors de la création de l'accès"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer",
+ "value": "Erreur lors du chargement des utilisateurs"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzer:",
+ "value": "Erreur lors du chargement des utilisateurs:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Benutzerinformationen",
+ "value": "Erreur lors du chargement des informations utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Dateien:",
+ "value": "Erreur lors du chargement des fichiers:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Logs",
+ "value": "Erreur lors du chargement des logs"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Nachrichten:",
+ "value": "Erreur lors du chargement des messages:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts",
+ "value": "Erreur lors du chargement des prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Prompts:",
+ "value": "Erreur lors du chargement des prompts:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der SharePoint Dokumente:",
+ "value": "Erreur lors du chargement des documents SharePoint:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Vorschau",
+ "value": "Erreur lors du chargement de l'aperçu"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Laden der Workflows:",
+ "value": "Erreur lors du chargement des workflows:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Löschen",
+ "value": "Erreur lors de la suppression"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.",
+ "value": "Échec de la sauvegarde des paramètres. Veuillez réessayer."
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Teilen des Prompts",
+ "value": "Erreur lors du partage du prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Verarbeiten der Dateien",
+ "value": "Erreur lors du traitement des fichiers"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler:",
+ "value": "Erreur:"
+ },
+ {
+ "context": "ui",
+ "key": "Fehlgeschlagen",
+ "value": "Échoué"
+ },
+ {
+ "context": "ui",
+ "key": "Filter löschen",
+ "value": "Effacer le filtre"
+ },
+ {
+ "context": "ui",
+ "key": "Firma",
+ "value": "Entreprise"
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname",
+ "value": "Nom de l'Entreprise"
+ },
+ {
+ "context": "ui",
+ "key": "Firmenname ist erforderlich",
+ "value": "Le nom de l'entreprise est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Folgenachricht wird gesendet...",
+ "value": "Envoi du message de suivi..."
+ },
+ {
+ "context": "ui",
+ "key": "Fortfahren",
+ "value": "Continuer"
+ },
+ {
+ "context": "ui",
+ "key": "Fortsetzen",
+ "value": "Continuer"
+ },
+ {
+ "context": "ui",
+ "key": "Fragen?",
+ "value": "Questions ?"
+ },
+ {
+ "context": "ui",
+ "key": "Français",
+ "value": "Français"
+ },
+ {
+ "context": "ui",
+ "key": "Fügen Sie eine Nachricht für die Empfänger hinzu",
+ "value": "Ajoutez un message pour les destinataires"
+ },
+ {
+ "context": "ui",
+ "key": "GESTOPPT",
+ "value": "ARRÊTÉ"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Ihren Firmennamen ein",
+ "value": "Entrez le nom de votre entreprise"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.",
+ "value": "Offrez aux clients un libre-service rapide et efficace pour les requêtes vocales et textuelles disponible 24h/24."
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie den Inhalt des Prompts ein",
+ "value": "Entrez le contenu du prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen Namen für den Prompt ein",
+ "value": "Entrez un nom pour le prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Geben Sie einen benutzerdefinierten Titel ein",
+ "value": "Entrez un titre personnalisé"
+ },
+ {
+ "context": "ui",
+ "key": "Geplante und automatisierte Workflows",
+ "value": "Workflows planifiés et automatisés"
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten",
+ "value": "Heures d'Ouverture"
+ },
+ {
+ "context": "ui",
+ "key": "Geschäftszeiten & Zeitzone",
+ "value": "Heures d'Ouverture et Fuseau Horaire"
+ },
+ {
+ "context": "ui",
+ "key": "Gespräch fortsetzen...",
+ "value": "Continuer la conversation..."
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet",
+ "value": "Démarré"
+ },
+ {
+ "context": "ui",
+ "key": "Gestartet:",
+ "value": "Démarré:"
+ },
+ {
+ "context": "ui",
+ "key": "Gestoppt",
+ "value": "Arrêté"
+ },
+ {
+ "context": "ui",
+ "key": "Geteilt",
+ "value": "Partagés"
+ },
+ {
+ "context": "ui",
+ "key": "Geteilte Dateien",
+ "value": "Fichiers partagés"
+ },
+ {
+ "context": "ui",
+ "key": "Globale Sprachsets verwalten (SysAdmin).",
+ "value": "Gérer les jeux de langue globaux (SysAdmin)."
+ },
+ {
+ "context": "ui",
+ "key": "Google",
+ "value": "Google"
+ },
+ {
+ "context": "ui",
+ "key": "Google verbinden",
+ "value": "Connecter Google"
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung erstellen",
+ "value": "Créer une connexion Google"
+ },
+ {
+ "context": "ui",
+ "key": "Google-Verbindung hinzufügen",
+ "value": "Ajouter une connexion Google"
+ },
+ {
+ "context": "ui",
+ "key": "Grundlegende Daten und Ressourcen",
+ "value": "Données et ressources de base"
+ },
+ {
+ "context": "ui",
+ "key": "Größe",
+ "value": "Taille"
+ },
+ {
+ "context": "ui",
+ "key": "Hell",
+ "value": "Clair"
+ },
+ {
+ "context": "ui",
+ "key": "Herunterladen",
+ "value": "Télécharger"
+ },
+ {
+ "context": "ui",
+ "key": "Hinzufügen",
+ "value": "Ajouter"
+ },
+ {
+ "context": "ui",
+ "key": "Hochgeladen",
+ "value": "Téléchargés"
+ },
+ {
+ "context": "ui",
+ "key": "Hochladen",
+ "value": "Télécharger"
+ },
+ {
+ "context": "ui",
+ "key": "ID",
+ "value": "ID"
+ },
+ {
+ "context": "ui",
+ "key": "INFO",
+ "value": "INFO"
+ },
+ {
+ "context": "ui",
+ "key": "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.",
+ "value": "Identifiez et authentifiez les appelants en quelques secondes avec une vérification et sécurité continues."
+ },
+ {
+ "context": "ui",
+ "key": "Ihre Anfrage wird verarbeitet...",
+ "value": "Traitement de votre demande..."
+ },
+ {
+ "context": "ui",
+ "key": "Inaktiv",
+ "value": "Inactif"
+ },
+ {
+ "context": "ui",
+ "key": "Information",
+ "value": "Information"
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt",
+ "value": "Contenu"
+ },
+ {
+ "context": "ui",
+ "key": "Inhalt ist erforderlich",
+ "value": "Le contenu est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Ja",
+ "value": "Oui"
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt anmelden",
+ "value": "S'inscrire Maintenant"
+ },
+ {
+ "context": "ui",
+ "key": "Jetzt überspringen",
+ "value": "Ignorer pour l'Instant"
+ },
+ {
+ "context": "ui",
+ "key": "KI-erstellt",
+ "value": "Créés par IA"
+ },
+ {
+ "context": "ui",
+ "key": "KI-gestützte Dokumentengenerierung:",
+ "value": "Génération de Documents alimentée par l'IA:"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Auth-Anbieter",
+ "value": "Aucune autorité d'authentification"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Benutzername",
+ "value": "Aucun nom d'utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Nachrichteninhalt verfügbar",
+ "value": "Aucun contenu de message disponible"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Name",
+ "value": "Aucun nom"
+ },
+ {
+ "context": "ui",
+ "key": "Kein Workflow ausgewählt",
+ "value": "Aucun workflow sélectionné"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Benutzer verfügbar",
+ "value": "Aucun utilisateur disponible"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung",
+ "value": "Aucun privilège"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Berechtigung zum Löschen des Prompts",
+ "value": "Aucune permission de supprimer l'invite"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Dateien gefunden.",
+ "value": "Aucun fichier trouvé."
+ },
+ {
+ "context": "ui",
+ "key": "Keine E-Mail",
+ "value": "Aucun e-mail"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Einträge",
+ "value": "Aucune entrée"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Logs für diesen Workflow verfügbar",
+ "value": "Aucun log disponible pour ce workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.",
+ "value": "Aucune connexion Microsoft trouvée. Veuillez d'abord créer une connexion."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Prompts verfügbar",
+ "value": "Aucun prompt disponible"
+ },
+ {
+ "context": "ui",
+ "key": "Keine SharePoint-Sites gefunden",
+ "value": "Aucun site SharePoint trouvé"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.",
+ "value": "Aucune donnée d'intégration vocale trouvée. Veuillez d'abord vous inscrire pour accéder aux paramètres."
+ },
+ {
+ "context": "ui",
+ "key": "Keine Sprache",
+ "value": "Aucune langue"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Transkripte vorhanden",
+ "value": "Aucune transcription disponible"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Vorschau verfügbar",
+ "value": "Aucun aperçu disponible"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows gefunden",
+ "value": "Aucun workflow trouvé"
+ },
+ {
+ "context": "ui",
+ "key": "Keine Workflows verfügbar",
+ "value": "Aucun workflow disponible"
+ },
+ {
+ "context": "ui",
+ "key": "Keine hochgeladenen Dateien gefunden.",
+ "value": "Aucun fichier téléchargé trouvé."
+ },
+ {
+ "context": "ui",
+ "key": "Keine mit Ihnen geteilten Dateien gefunden.",
+ "value": "Aucun fichier partagé trouvé."
+ },
+ {
+ "context": "ui",
+ "key": "Keine von der KI erstellten Dateien gefunden.",
+ "value": "Aucun fichier créé par IA trouvé."
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen",
+ "value": "Cliquez à nouveau pour confirmer"
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie erneut zum Bestätigen der Löschung",
+ "value": "Cliquez à nouveau pour confirmer la suppression"
+ },
+ {
+ "context": "ui",
+ "key": "Klicken Sie, um zu öffnen",
+ "value": "Cliquez pour ouvrir"
+ },
+ {
+ "context": "ui",
+ "key": "Knowledge Agent (KA)",
+ "value": "Agent de Connaissance (KA)"
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.",
+ "value": "Configurez les paramètres administratifs et les préférences système."
+ },
+ {
+ "context": "ui",
+ "key": "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.",
+ "value": "Configurez et gérez les règles de contrôle d'accès basé sur les rôles."
+ },
+ {
+ "context": "ui",
+ "key": "Kontakte einrichten",
+ "value": "Configurer les Contacts"
+ },
+ {
+ "context": "ui",
+ "key": "Kontaktinformationen",
+ "value": "Informations de Contact"
+ },
+ {
+ "context": "ui",
+ "key": "Kontostatus",
+ "value": "Statut du compte"
+ },
+ {
+ "context": "ui",
+ "key": "Kopieren",
+ "value": "Copier"
+ },
+ {
+ "context": "ui",
+ "key": "Kosteneinsparungen & Effizienz:",
+ "value": "Économies de Coûts & Efficacité:"
+ },
+ {
+ "context": "ui",
+ "key": "Kundenverträge verwalten",
+ "value": "Gérer les contrats clients"
+ },
+ {
+ "context": "ui",
+ "key": "Lade Fortschritt...",
+ "value": "Chargement du progrès..."
+ },
+ {
+ "context": "ui",
+ "key": "Laden...",
+ "value": "Téléchargement..."
+ },
+ {
+ "context": "ui",
+ "key": "Land",
+ "value": "Pays"
+ },
+ {
+ "context": "ui",
+ "key": "Land ist erforderlich",
+ "value": "Le pays est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Leer = Zugriff auf alle Verträge",
+ "value": "Vide = Accès à tous les contrats"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität",
+ "value": "Dernière activité"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivität:",
+ "value": "Dernière activité:"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit",
+ "value": "Activités Récentes - Consultez votre travail le plus récent"
+ },
+ {
+ "context": "ui",
+ "key": "Letzte Seite",
+ "value": "Dernière page"
+ },
+ {
+ "context": "ui",
+ "key": "Link konnte nicht gesendet werden",
+ "value": "Échec de l'envoi du lien"
+ },
+ {
+ "context": "ui",
+ "key": "Log",
+ "value": "Journal"
+ },
+ {
+ "context": "ui",
+ "key": "Logs konnten nicht geladen werden",
+ "value": "Échec du chargement des logs"
+ },
+ {
+ "context": "ui",
+ "key": "Logs werden geladen...",
+ "value": "Chargement des logs..."
+ },
+ {
+ "context": "ui",
+ "key": "Lokal",
+ "value": "Local"
+ },
+ {
+ "context": "ui",
+ "key": "LÄUFT",
+ "value": "EN COURS"
+ },
+ {
+ "context": "ui",
+ "key": "Lädt hoch...",
+ "value": "Téléchargement..."
+ },
+ {
+ "context": "ui",
+ "key": "Läuft",
+ "value": "En cours"
+ },
+ {
+ "context": "ui",
+ "key": "Läuft ab am",
+ "value": "Expire le"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen",
+ "value": "Supprimer"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen ({count})",
+ "value": "Supprimer ({count})"
+ },
+ {
+ "context": "ui",
+ "key": "Löschen...",
+ "value": "Suppression..."
+ },
+ {
+ "context": "ui",
+ "key": "MIME-Typ",
+ "value": "Type MIME"
+ },
+ {
+ "context": "ui",
+ "key": "Management-Tools umfassen:",
+ "value": "Les outils de gestion incluent:"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.",
+ "value": "Les clients peuvent basculer sur le numéro SIP technique à tout moment et économiser des coûts téléphoniques significatifs. L'intégration fonctionne comme un autre connecteur (Outlook, SharePoint) et est intégrée de manière transparente dans votre workflow existant."
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich eingereicht!",
+ "value": "Mandat Soumis avec Succès !"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erfolgreich erstellt",
+ "value": "Mandat créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat erstellen",
+ "value": "Créer le Mandat"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat hinzufügen",
+ "value": "Ajouter un mandat"
+ },
+ {
+ "context": "ui",
+ "key": "Mandat-ID",
+ "value": "ID Mandat"
+ },
+ {
+ "context": "ui",
+ "key": "Mandate",
+ "value": "Mandats"
+ },
+ {
+ "context": "ui",
+ "key": "Mandate und Berechtigungen verwalten",
+ "value": "Gérer les mandats et les permissions"
+ },
+ {
+ "context": "ui",
+ "key": "Mandatsverwaltung",
+ "value": "Gestion des mandats"
+ },
+ {
+ "context": "ui",
+ "key": "Mehr erfahren",
+ "value": "En savoir plus"
+ },
+ {
+ "context": "ui",
+ "key": "Meine Uploads",
+ "value": "Mes téléchargements"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft",
+ "value": "Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft Verbindungen",
+ "value": "Connexions Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft verbinden",
+ "value": "Connecter Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung erstellen",
+ "value": "Créer une connexion Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Microsoft-Verbindung hinzufügen",
+ "value": "Ajouter une connexion Microsoft"
+ },
+ {
+ "context": "ui",
+ "key": "Mitglied hinzufügen",
+ "value": "Ajouter un membre"
+ },
+ {
+ "context": "ui",
+ "key": "MwSt %",
+ "value": "TVA %"
+ },
+ {
+ "context": "ui",
+ "key": "MwSt Betrag",
+ "value": "Montant TVA"
+ },
+ {
+ "context": "ui",
+ "key": "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.",
+ "value": "Souhaitez-vous configurer les contacts pour votre mandat maintenant ? Vous pouvez également le faire plus tard dans les paramètres."
+ },
+ {
+ "context": "ui",
+ "key": "Nach unten scrollen",
+ "value": "Faire défiler vers le bas"
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht (optional)",
+ "value": "Message (facultatif)"
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht eingeben...",
+ "value": "Entrez votre message..."
+ },
+ {
+ "context": "ui",
+ "key": "Nachricht wird gesendet...",
+ "value": "Envoi du message..."
+ },
+ {
+ "context": "ui",
+ "key": "Nachrichten",
+ "value": "Messages"
+ },
+ {
+ "context": "ui",
+ "key": "Nahtloser Mandanten-Workflow:",
+ "value": "Workflow Client Transparent:"
+ },
+ {
+ "context": "ui",
+ "key": "Name",
+ "value": "Nom"
+ },
+ {
+ "context": "ui",
+ "key": "Name des Unternehmens",
+ "value": "Nom de l'entreprise"
+ },
+ {
+ "context": "ui",
+ "key": "Name ist erforderlich",
+ "value": "Le nom est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Navigation - Erkunden Sie alle verfügbaren Tools",
+ "value": "Navigation - Explorez tous les outils disponibles"
+ },
+ {
+ "context": "ui",
+ "key": "Nein",
+ "value": "Non"
+ },
+ {
+ "context": "ui",
+ "key": "Neu starten",
+ "value": "Recommencer"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung",
+ "value": "Nouvelle Automatisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Automatisierung erstellen",
+ "value": "Créer une Nouvelle Automatisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Datei hochladen",
+ "value": "Télécharger un nouveau fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation",
+ "value": "Nouvelle Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Organisation erstellen",
+ "value": "Créer une nouvelle organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position",
+ "value": "Nouvelle Position"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Position erstellen",
+ "value": "Créer une nouvelle position"
+ },
+ {
+ "context": "ui",
+ "key": "Neue RBAC-Regel erstellen",
+ "value": "Créer une nouvelle règle RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle",
+ "value": "Nouveau Rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Rolle erstellen",
+ "value": "Créer un nouveau rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Neue Sprache",
+ "value": "Nouvelle langue"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Prompt erstellen",
+ "value": "Créer un nouveau prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Vertrag erstellen",
+ "value": "Créer un nouveau contrat"
+ },
+ {
+ "context": "ui",
+ "key": "Neuen Zugriff erstellen",
+ "value": "Créer un nouvel accès"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Prompt",
+ "value": "Nouveau prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Vertrag",
+ "value": "Nouveau Contrat"
+ },
+ {
+ "context": "ui",
+ "key": "Neuer Zugriff",
+ "value": "Nouvel Accès"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument",
+ "value": "Nouveau Document"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Dokument erstellen",
+ "value": "Créer un nouveau document"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Mandat erstellen",
+ "value": "Créer un nouveau mandat"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Team-Mitglied erstellen",
+ "value": "Créer un nouveau membre de l'équipe"
+ },
+ {
+ "context": "ui",
+ "key": "Neues Transkript",
+ "value": "Nouvelle Transcription"
+ },
+ {
+ "context": "ui",
+ "key": "Nicht verfügbar",
+ "value": "N/D"
+ },
+ {
+ "context": "ui",
+ "key": "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.",
+ "value": "Aucune commande exécutée pour le moment. Envoyez une commande pour voir les résultats ici."
+ },
+ {
+ "context": "ui",
+ "key": "Noch keinen Workflow ausgewählt",
+ "value": "Aucun workflow sélectionné"
+ },
+ {
+ "context": "ui",
+ "key": "Nochmal versuchen",
+ "value": "Réessayer"
+ },
+ {
+ "context": "ui",
+ "key": "Nächste Seite",
+ "value": "Page suivante"
+ },
+ {
+ "context": "ui",
+ "key": "Oder geben Sie Ihre Nachricht ein...",
+ "value": "Ou entrez votre message..."
+ },
+ {
+ "context": "ui",
+ "key": "Ordnerpfade",
+ "value": "Chemins des dossiers"
+ },
+ {
+ "context": "ui",
+ "key": "Organisation",
+ "value": "Organisation"
+ },
+ {
+ "context": "ui",
+ "key": "Organisation erfolgreich erstellt",
+ "value": "Organisation créée avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Organisationen",
+ "value": "Organisations"
+ },
+ {
+ "context": "ui",
+ "key": "Originalbetrag",
+ "value": "Montant d'origine"
+ },
+ {
+ "context": "ui",
+ "key": "Originalwährung",
+ "value": "Devise d'origine"
+ },
+ {
+ "context": "ui",
+ "key": "PDF",
+ "value": "PDF"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort",
+ "value": "Mot de passe"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort eingeben",
+ "value": "Entrez le mot de passe"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link gesendet!",
+ "value": "Lien de mot de passe envoyé!"
+ },
+ {
+ "context": "ui",
+ "key": "Passwort-Link senden",
+ "value": "Envoyer le lien de mot de passe"
+ },
+ {
+ "context": "ui",
+ "key": "Pfad",
+ "value": "Chemin"
+ },
+ {
+ "context": "ui",
+ "key": "Position erfolgreich erstellt",
+ "value": "Position créée avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Positionen",
+ "value": "Positions"
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl",
+ "value": "Code Postal"
+ },
+ {
+ "context": "ui",
+ "key": "Postleitzahl ist erforderlich",
+ "value": "Le code postal est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Projekte",
+ "value": "Projets"
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung",
+ "value": "Gestion de projets"
+ },
+ {
+ "context": "ui",
+ "key": "Projektverwaltung und -organisation",
+ "value": "Gestion et organisation de projets"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt",
+ "value": "Prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Einstellungen",
+ "value": "Paramètres de prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt Vorlage",
+ "value": "Modèle de prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt ausführen",
+ "value": "Exécuter le prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt auswählen...",
+ "value": "Sélectionner un prompt..."
+ },
+ {
+ "context": "ui",
+ "key": "Prompt bearbeiten",
+ "value": "Modifier le prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erfolgreich erstellt",
+ "value": "Prompt créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt erstellen",
+ "value": "Créer le prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt hinzufügen",
+ "value": "Ajouter un prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt löschen",
+ "value": "Effacer le prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt teilen",
+ "value": "Partager le prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt wird gelöscht...",
+ "value": "Suppression du prompt..."
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt",
+ "value": "Contenu du prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten",
+ "value": "Le contenu du prompt ne peut pas dépasser 10 000 caractères"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Inhalt darf nicht leer sein",
+ "value": "Le contenu du prompt ne peut pas être vide"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name",
+ "value": "Nom du prompt"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf 100 Zeichen nicht überschreiten",
+ "value": "Le nom du prompt ne peut pas dépasser 100 caractères"
+ },
+ {
+ "context": "ui",
+ "key": "Prompt-Name darf nicht leer sein",
+ "value": "Le nom du prompt ne peut pas être vide"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts",
+ "value": "Prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts für Ihren KI-Assistenten erstellen und verwalten",
+ "value": "Créer et gérer des prompts pour votre assistant IA"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts verwalten",
+ "value": "Gérer vos prompts"
+ },
+ {
+ "context": "ui",
+ "key": "Prompts werden geladen...",
+ "value": "Chargement des prompts..."
+ },
+ {
+ "context": "ui",
+ "key": "Python",
+ "value": "Python"
+ },
+ {
+ "context": "ui",
+ "key": "Quelle",
+ "value": "Source"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel erfolgreich erstellt",
+ "value": "Règle RBAC créée avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regel hinzufügen",
+ "value": "Ajouter une règle RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regeln",
+ "value": "Règles RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Regelverwaltung",
+ "value": "Gestion des règles RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollen",
+ "value": "Rôles RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "RBAC-Rollenverwaltung",
+ "value": "Gestion des rôles RBAC"
+ },
+ {
+ "context": "ui",
+ "key": "Registrieren",
+ "value": "S'inscrire"
+ },
+ {
+ "context": "ui",
+ "key": "Revolutionäre Telefonie-Integration mit Spitch.ai",
+ "value": "Intégration Téléphonique Révolutionnaire avec Spitch.ai"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle",
+ "value": "Rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle erfolgreich erstellt",
+ "value": "Rôle créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Rolle hinzufügen",
+ "value": "Ajouter un rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Rollen",
+ "value": "Rôles"
+ },
+ {
+ "context": "ui",
+ "key": "Rollen-ID",
+ "value": "ID du rôle"
+ },
+ {
+ "context": "ui",
+ "key": "Rollenbasierte Zugriffssteuerungsregeln",
+ "value": "Règles de contrôle d'accès basé sur les rôles"
+ },
+ {
+ "context": "ui",
+ "key": "Rollenverwaltung",
+ "value": "Gestion des rôles"
+ },
+ {
+ "context": "ui",
+ "key": "Rufname am Telefon",
+ "value": "Nom au téléphone"
+ },
+ {
+ "context": "ui",
+ "key": "Runde",
+ "value": "Tour"
+ },
+ {
+ "context": "ui",
+ "key": "Runden",
+ "value": "Tours"
+ },
+ {
+ "context": "ui",
+ "key": "Schließen",
+ "value": "Fermer"
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff",
+ "value": "Accès Rapide"
+ },
+ {
+ "context": "ui",
+ "key": "Schnellzugriff - Springen Sie zu häufig verwendeten Features",
+ "value": "Accès Rapide - Accédez rapidement aux fonctionnalités fréquemment utilisées"
+ },
+ {
+ "context": "ui",
+ "key": "Seite {page} von {total} ({count} Einträge)",
+ "value": "Page {page} sur {total} ({count} éléments)"
+ },
+ {
+ "context": "ui",
+ "key": "Senden",
+ "value": "Envoyer"
+ },
+ {
+ "context": "ui",
+ "key": "Service",
+ "value": "Service"
+ },
+ {
+ "context": "ui",
+ "key": "Service-Verbindungen",
+ "value": "Connexions de service"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Dokumente",
+ "value": "Documents SharePoint"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Site URL",
+ "value": "URL du site SharePoint"
+ },
+ {
+ "context": "ui",
+ "key": "SharePoint Test",
+ "value": "Test SharePoint"
+ },
+ {
+ "context": "ui",
+ "key": "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.",
+ "value": "Vous recevrez un email de confirmation dans les prochaines minutes."
+ },
+ {
+ "context": "ui",
+ "key": "Sie können auch auf den Upload-Button klicken",
+ "value": "Vous pouvez aussi cliquer sur le bouton de téléchargement"
+ },
+ {
+ "context": "ui",
+ "key": "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.",
+ "value": "Vous devez d'abord vous inscrire à l'intégration vocale pour accéder à la gestion des transcriptions."
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer \"{name}\" ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer le workflow \"{id}...\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.",
+ "value": "Êtes-vous sûr de vouloir réinitialiser tous les paramètres d'intégration vocale ? Cette action ne peut pas être annulée."
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer le workflow \"{name}\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer le fichier \"{name}\"?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer les {count} éléments sélectionnés ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer la connexion {service} ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer cet utilisateur ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer {count} utilisateurs ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer {count} prompts ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?",
+ "value": "Êtes-vous sûr de vouloir supprimer {count} connexions ?"
+ },
+ {
+ "context": "ui",
+ "key": "Sites entdecken",
+ "value": "Découvrir les sites"
+ },
+ {
+ "context": "ui",
+ "key": "Speech Analytics (SA)",
+ "value": "Analyse Vocale (SA)"
+ },
+ {
+ "context": "ui",
+ "key": "Speichern",
+ "value": "Enregistrer"
+ },
+ {
+ "context": "ui",
+ "key": "Speichern...",
+ "value": "Sauvegarde..."
+ },
+ {
+ "context": "ui",
+ "key": "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.",
+ "value": "Spitch vérifie l'autorisation client avec PowerOn avant chaque appel, tandis que tous les changements de données sont initiés centralement par PowerOn. Les transcriptions d'appels sont stockées en temps réel dans votre base de données PowerOn avec une isolation complète du client et la sécurité. En cas de panne, les appels sont automatiquement bloqués pour assurer l'intégrité."
+ },
+ {
+ "context": "ui",
+ "key": "Sprach Integration",
+ "value": "Intégration Vocale"
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Einstellungen",
+ "value": "Paramètres Vocaux"
+ },
+ {
+ "context": "ui",
+ "key": "Sprach-Integration Einstellungen",
+ "value": "Paramètres d'Intégration Vocale"
+ },
+ {
+ "context": "ui",
+ "key": "Sprache",
+ "value": "Langue"
+ },
+ {
+ "context": "ui",
+ "key": "Sprachset {code} wirklich löschen?",
+ "value": "Supprimer vraiment le jeu de langue {code} ?"
+ },
+ {
+ "context": "ui",
+ "key": "Stadt",
+ "value": "Ville"
+ },
+ {
+ "context": "ui",
+ "key": "Stadt ist erforderlich",
+ "value": "La ville est requise"
+ },
+ {
+ "context": "ui",
+ "key": "Start",
+ "value": "Démarrage"
+ },
+ {
+ "context": "ui",
+ "key": "Startzeit",
+ "value": "Heure de Début"
+ },
+ {
+ "context": "ui",
+ "key": "Status",
+ "value": "Statut"
+ },
+ {
+ "context": "ui",
+ "key": "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.",
+ "value": "Mettez tout ce dont vos agents ont besoin à portée de main, avec un bureau d'agent unifié."
+ },
+ {
+ "context": "ui",
+ "key": "Stoppen",
+ "value": "Arrêter"
+ },
+ {
+ "context": "ui",
+ "key": "Straße",
+ "value": "Rue"
+ },
+ {
+ "context": "ui",
+ "key": "Straße ist erforderlich",
+ "value": "La rue est requise"
+ },
+ {
+ "context": "ui",
+ "key": "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.",
+ "value": "Recherchez des emplacements par adresse ou coordonnées, ou utilisez le langage naturel pour créer et gérer des projets."
+ },
+ {
+ "context": "ui",
+ "key": "Suchen...",
+ "value": "Rechercher..."
+ },
+ {
+ "context": "ui",
+ "key": "Systemadministrator",
+ "value": "Administrateur système"
+ },
+ {
+ "context": "ui",
+ "key": "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren",
+ "value": "Paramètres Système - Configurer les paramètres de l'espace de travail"
+ },
+ {
+ "context": "ui",
+ "key": "Tabelle",
+ "value": "Feuille de calcul"
+ },
+ {
+ "context": "ui",
+ "key": "Tags",
+ "value": "Étiquettes"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Bereich",
+ "value": "Espace équipe"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglied erfolgreich erstellt",
+ "value": "Membre de l'équipe créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder",
+ "value": "Membres de l'équipe"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten",
+ "value": "Gérer les membres de votre équipe"
+ },
+ {
+ "context": "ui",
+ "key": "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren",
+ "value": "Gérer les membres de l'équipe, définir les permissions et configurer les paramètres de collaboration"
+ },
+ {
+ "context": "ui",
+ "key": "Teilen",
+ "value": "Partager"
+ },
+ {
+ "context": "ui",
+ "key": "Telefon",
+ "value": "Téléphone"
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer",
+ "value": "Numéro de Téléphone"
+ },
+ {
+ "context": "ui",
+ "key": "Telefonnummer ist erforderlich",
+ "value": "Le numéro de téléphone est requis"
+ },
+ {
+ "context": "ui",
+ "key": "Text",
+ "value": "Texte"
+ },
+ {
+ "context": "ui",
+ "key": "Textvorschau",
+ "value": "Aperçu du texte"
+ },
+ {
+ "context": "ui",
+ "key": "Theme",
+ "value": "Thème"
+ },
+ {
+ "context": "ui",
+ "key": "Token",
+ "value": "Jetons"
+ },
+ {
+ "context": "ui",
+ "key": "Transkript",
+ "value": "Transcription"
+ },
+ {
+ "context": "ui",
+ "key": "Transkript wird verarbeitet...",
+ "value": "Traitement de la transcription..."
+ },
+ {
+ "context": "ui",
+ "key": "Transkriptverwaltung",
+ "value": "Gestion des Transcriptions"
+ },
+ {
+ "context": "ui",
+ "key": "Trennungsfehler",
+ "value": "Erreur de déconnexion"
+ },
+ {
+ "context": "ui",
+ "key": "Treuhand",
+ "value": "Fiduciaire"
+ },
+ {
+ "context": "ui",
+ "key": "Treuhandverwaltung",
+ "value": "Gestion Fiduciaire"
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Organisationen verwalten",
+ "value": "Gérer les organisations fiduciaires"
+ },
+ {
+ "context": "ui",
+ "key": "Trustee-Rollen verwalten",
+ "value": "Gérer les rôles fiduciaires"
+ },
+ {
+ "context": "ui",
+ "key": "Typ",
+ "value": "Type"
+ },
+ {
+ "context": "ui",
+ "key": "UI-Sprachen",
+ "value": "Langues de l’UI"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannt",
+ "value": "Inconnu"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekannte Größe",
+ "value": "Taille inconnue"
+ },
+ {
+ "context": "ui",
+ "key": "Unbekanntes Datum",
+ "value": "Date inconnue"
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannt",
+ "value": "Sans nom"
+ },
+ {
+ "context": "ui",
+ "key": "Unbenannter Workflow",
+ "value": "Workflow sans nom"
+ },
+ {
+ "context": "ui",
+ "key": "Ungültiges Datum",
+ "value": "Date invalide"
+ },
+ {
+ "context": "ui",
+ "key": "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.",
+ "value": "Notre équipe examinera votre mandat dans les 1-2 jours ouvrables."
+ },
+ {
+ "context": "ui",
+ "key": "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.",
+ "value": "Notre moteur d'extraction de documents déjà actif génère automatiquement des documents personnalisés pour Spitch basés sur les données spécifiques au client. L'IA utilise les bases de données FAQ, les informations employés et les détails de service pour rendre chaque appel contextuel et hautement personnalisé."
+ },
+ {
+ "context": "ui",
+ "key": "Unternehmensinformationen",
+ "value": "Informations de l'Entreprise"
+ },
+ {
+ "context": "ui",
+ "key": "Unterstützt von",
+ "value": "Alimenté par"
+ },
+ {
+ "context": "ui",
+ "key": "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.",
+ "value": "Échec du téléchargement. Veuillez réessayer."
+ },
+ {
+ "context": "ui",
+ "key": "VERARBEITUNG",
+ "value": "TRAITEMENT"
+ },
+ {
+ "context": "ui",
+ "key": "Valutadatum",
+ "value": "Date de valeur"
+ },
+ {
+ "context": "ui",
+ "key": "Verarbeitung",
+ "value": "En cours"
+ },
+ {
+ "context": "ui",
+ "key": "Verbinden",
+ "value": "Connecter"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung aktualisieren",
+ "value": "Mettre à jour la connexion"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindung testen",
+ "value": "Tester la connexion"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen",
+ "value": "Connexions"
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungen werden geladen...",
+ "value": "Chargement des connexions..."
+ },
+ {
+ "context": "ui",
+ "key": "Verbindungsfehler",
+ "value": "Erreur de connexion"
+ },
+ {
+ "context": "ui",
+ "key": "Verbunden am",
+ "value": "Connecté le"
+ },
+ {
+ "context": "ui",
+ "key": "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.",
+ "value": "Unifiez et livrez des informations à vos clients et employés où et quand ils en ont besoin."
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Tools",
+ "value": "Outils Disponibles"
+ },
+ {
+ "context": "ui",
+ "key": "Verfügbare Workflows",
+ "value": "Workflows disponibles"
+ },
+ {
+ "context": "ui",
+ "key": "Version",
+ "value": "Version"
+ },
+ {
+ "context": "ui",
+ "key": "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.",
+ "value": "Essayez de reconnecter votre compte Microsoft dans la page Connexions."
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag",
+ "value": "Contrat"
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag (optional)",
+ "value": "Contrat (optionnel)"
+ },
+ {
+ "context": "ui",
+ "key": "Vertrag erfolgreich erstellt",
+ "value": "Contrat créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Verträge",
+ "value": "Contrats"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.",
+ "value": "Gérez les données via des tableaux. Sélectionnez un tableau ou utilisez le langage naturel pour exécuter des commandes."
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Kontoinformationen",
+ "value": "Gérez vos informations de compte"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Service-Verbindungen",
+ "value": "Gérez vos connexions de service"
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.",
+ "value": "Gérez votre configuration et vos préférences d'intégration vocale."
+ },
+ {
+ "context": "ui",
+ "key": "Verwalten Sie Mandate und deren zugehörige Berechtigungen.",
+ "value": "Gérez les mandats et leurs permissions associées."
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltet von {provider}",
+ "value": "Géré par {provider}"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Benutzerzugriffe auf Organisationen",
+ "value": "Gestion des accès utilisateurs aux organisations"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Buchungspositionen (Speseneinträge)",
+ "value": "Gestion des positions de réservation (entrées de dépenses)"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Dokumente und Belege",
+ "value": "Gestion des documents et pièces justificatives"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Feature-spezifischen Rollen",
+ "value": "Gestion des rôles spécifiques à la fonctionnalité"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Kundenverträge",
+ "value": "Gestion des contrats clients"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung der Treuhand-Organisationen",
+ "value": "Gestion des organisations fiduciaires"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen",
+ "value": "Gestion des organisations fiduciaires, contrats et réservations"
+ },
+ {
+ "context": "ui",
+ "key": "Verwaltungs- und Management-Tools",
+ "value": "Outils d'administration et de gestion"
+ },
+ {
+ "context": "ui",
+ "key": "Verwende Vorlage:",
+ "value": "Utilisation du modèle:"
+ },
+ {
+ "context": "ui",
+ "key": "Video",
+ "value": "Vidéo"
+ },
+ {
+ "context": "ui",
+ "key": "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.",
+ "value": "Merci pour votre intérêt pour notre Intégration Vocale powered by Spitch.ai. Nous avons reçu votre mandat et l'examinerons sous peu."
+ },
+ {
+ "context": "ui",
+ "key": "Virtual Assistant (VA)",
+ "value": "Assistant Virtuel (VA)"
+ },
+ {
+ "context": "ui",
+ "key": "Voice Biometrics (VB)",
+ "value": "Biométrie Vocale (VB)"
+ },
+ {
+ "context": "ui",
+ "key": "Vollständiger Name",
+ "value": "Nom complet"
+ },
+ {
+ "context": "ui",
+ "key": "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.",
+ "value": "De l'inscription à la configuration technique - votre client s'inscrit auprès de PowerOn pour les services téléphoniques, télécharge des documents et reçoit automatiquement un numéro SIP technique de Spitch. Le transfert d'appel peut être activé ou désactivé à tout moment, garantissant une flexibilité maximale et la sécurité BCM."
+ },
+ {
+ "context": "ui",
+ "key": "Vorherige Seite",
+ "value": "Page précédente"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau",
+ "value": "Aperçu"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau für diesen Dateityp nicht verfügbar",
+ "value": "Aperçu non disponible pour ce type de fichier"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau schließen",
+ "value": "Fermer l'aperçu"
+ },
+ {
+ "context": "ui",
+ "key": "Vorschau wird geladen...",
+ "value": "Chargement de l'aperçu..."
+ },
+ {
+ "context": "ui",
+ "key": "WARTEND",
+ "value": "EN ATTENTE"
+ },
+ {
+ "context": "ui",
+ "key": "Wartend",
+ "value": "En attente"
+ },
+ {
+ "context": "ui",
+ "key": "Was passiert als nächstes?",
+ "value": "Que se passe-t-il ensuite ?"
+ },
+ {
+ "context": "ui",
+ "key": "Wechseln Sie zwischen hellem und dunklem Modus",
+ "value": "Basculer entre le mode clair et sombre"
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge",
+ "value": "Outils"
+ },
+ {
+ "context": "ui",
+ "key": "Werkzeuge und Hilfsmittel",
+ "value": "Outils et utilitaires"
+ },
+ {
+ "context": "ui",
+ "key": "Wie möchten Sie am Telefon genannt werden?",
+ "value": "Comment souhaitez-vous être appelé au téléphone ?"
+ },
+ {
+ "context": "ui",
+ "key": "Wiederholen",
+ "value": "Réessayer"
+ },
+ {
+ "context": "ui",
+ "key": "Willkommen in Ihrem Arbeitsbereich",
+ "value": "Bienvenue dans votre espace de travail"
+ },
+ {
+ "context": "ui",
+ "key": "Wird gesendet...",
+ "value": "Envoi..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird gestoppt...",
+ "value": "Arrêt..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird geteilt...",
+ "value": "Partage en cours..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird hochgeladen...",
+ "value": "Téléchargement..."
+ },
+ {
+ "context": "ui",
+ "key": "Wird verarbeitet...",
+ "value": "Traitement..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow",
+ "value": "Workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow Fortschritt",
+ "value": "Progression du workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow auswählen",
+ "value": "Sélectionner un workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fehlgeschlagen.",
+ "value": "Échec du workflow."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow fortsetzen",
+ "value": "Reprendre le workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow läuft... Warte auf Logs...",
+ "value": "Workflow en cours... En attente des logs..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow löschen",
+ "value": "Supprimer le workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow stoppen",
+ "value": "Arrêter le workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird fortgesetzt",
+ "value": "Workflow en cours"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow wird gelöscht...",
+ "value": "Suppression du workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Automatisierungen verwalten",
+ "value": "Gérer les automatisations de workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Nachrichten werden geladen...",
+ "value": "Chargement des messages de workflow..."
+ },
+ {
+ "context": "ui",
+ "key": "Workflow-Verlauf",
+ "value": "Historique des workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Workflows",
+ "value": "Workflows"
+ },
+ {
+ "context": "ui",
+ "key": "Workflows werden geladen...",
+ "value": "Chargement des workflows..."
+ },
+ {
+ "context": "ui",
+ "key": "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow",
+ "value": "Sélectionnez un workflow dans la liste ou démarrez un nouveau workflow"
+ },
+ {
+ "context": "ui",
+ "key": "Wählen Sie Ihre bevorzugte Sprache",
+ "value": "Choisissez votre langue préférée"
+ },
+ {
+ "context": "ui",
+ "key": "You",
+ "value": "Vous"
+ },
+ {
+ "context": "ui",
+ "key": "Zeitzone",
+ "value": "Fuseau Horaire"
+ },
+ {
+ "context": "ui",
+ "key": "Zentrale",
+ "value": "Centre d'activité"
+ },
+ {
+ "context": "ui",
+ "key": "Zu dunklem Modus wechseln",
+ "value": "Passer en mode sombre"
+ },
+ {
+ "context": "ui",
+ "key": "Zu hellem Modus wechseln",
+ "value": "Passer en mode clair"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff",
+ "value": "Accès"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff erfolgreich erstellt",
+ "value": "Accès créé avec succès"
+ },
+ {
+ "context": "ui",
+ "key": "Zugriff verweigert",
+ "value": "Accès Refusé"
+ },
+ {
+ "context": "ui",
+ "key": "Zuletzt geprüft",
+ "value": "Dernière vérification"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken",
+ "value": "Cliquez pour confirmer"
+ },
+ {
+ "context": "ui",
+ "key": "Zum Bestätigen klicken...",
+ "value": "Cliquez pour confirmer..."
+ },
+ {
+ "context": "ui",
+ "key": "Zurück zur Sprach Integration",
+ "value": "Retour à l'Intégration Vocale"
+ },
+ {
+ "context": "ui",
+ "key": "angehängt",
+ "value": "attaché"
+ },
+ {
+ "context": "ui",
+ "key": "ausgewählt",
+ "value": "sélectionné(s)"
+ },
+ {
+ "context": "ui",
+ "key": "kontakt@firma.com",
+ "value": "contact@entreprise.com"
+ },
+ {
+ "context": "ui",
+ "key": "oder",
+ "value": "ou"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Beleg.pdf",
+ "value": "ex. Justificatif.pdf"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Finanzdienstleistungen, Technologie, etc.",
+ "value": "ex. Services Financiers, Technologie, etc."
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Muster AG 2026",
+ "value": "ex. Muster AG 2026"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. Treuhand AG Zürich",
+ "value": "ex. Fiduciaire AG Zurich"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. admin, operate, userreport",
+ "value": "ex. admin, operate, userreport"
+ },
+ {
+ "context": "ui",
+ "key": "z.B. treuhand-ag-zuerich",
+ "value": "ex. fiduciaire-ag-zurich"
+ },
+ {
+ "context": "ui",
+ "key": "{authority} Verbindung bearbeiten",
+ "value": "Modifier la connexion {authority}"
+ },
+ {
+ "context": "ui",
+ "key": "{column} filtern",
+ "value": "Filtrer {column}"
+ },
+ {
+ "context": "ui",
+ "key": "{count} Benutzer ausgewählt",
+ "value": "{count} utilisateurs sélectionnés"
+ },
+ {
+ "context": "ui",
+ "key": "Änderungen speichern",
+ "value": "Sauvegarder les Modifications"
+ },
+ {
+ "context": "ui",
+ "key": "Über",
+ "value": "À propos"
+ },
+ {
+ "context": "ui",
+ "key": "Überprüfungsprozess",
+ "value": "Processus de Révision"
+ },
+ {
+ "context": "ui",
+ "key": "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates",
+ "value": "Aperçu - Consultez le statut et les mises à jour de l'espace de travail"
+ },
+ {
+ "context": "ui",
+ "key": "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.",
+ "value": "Surveillez automatiquement 100% des conversations pour obtenir des insights précieux pour votre entreprise."
+ },
+ {
+ "context": "ui",
+ "key": "(gefiltert nach {name})",
+ "value": "(filtré par {name})"
+ },
+ {
+ "context": "ui",
+ "key": "({count} gefiltert)",
+ "value": "({count} filtrés)"
+ },
+ {
+ "context": "ui",
+ "key": "Abonnement, Einstellungen und Guthaben pro Mandant",
+ "value": "Abonnement, paramètres et crédits par client"
+ },
+ {
+ "context": "ui",
+ "key": "Abrechnung",
+ "value": "Facturation"
+ },
+ {
+ "context": "ui",
+ "key": "Aktion",
+ "value": "Action"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Billing",
+ "value": "Facturation utilisateurs"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer-Guthaben",
+ "value": "Crédits utilisateur"
+ },
+ {
+ "context": "ui",
+ "key": "Benutzer:",
+ "value": "Utilisateur :"
+ },
+ {
+ "context": "ui",
+ "key": "Deaktiviert",
+ "value": "Désactivé"
+ },
+ {
+ "context": "ui",
+ "key": "Du hast Zugriff auf {instanceCount} {instanceWord} in {mandateCount} {mandateWord}.",
+ "value": "Vous avez accès à {instanceCount} {instanceWord} sur {mandateCount} {mandateWord}."
+ },
+ {
+ "context": "ui",
+ "key": "Einstellungen gespeichert!",
+ "value": "Paramètres enregistrés !"
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanz",
+ "value": "instance de fonctionnalité"
+ },
+ {
+ "context": "ui",
+ "key": "Feature-Instanzen",
+ "value": "instances de fonctionnalité"
+ },
+ {
+ "context": "ui",
+ "key": "Fehler beim Speichern",
+ "value": "Erreur lors de l'enregistrement"
+ },
+ {
+ "context": "ui",
+ "key": "Gesamtguthaben",
+ "value": "Crédit total"
+ },
+ {
+ "context": "ui",
+ "key": "Mandant:",
+ "value": "Client :"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten",
+ "value": "Clients"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Billing",
+ "value": "Facturation clients"
+ },
+ {
+ "context": "ui",
+ "key": "Mandanten-Guthaben",
+ "value": "Crédits clients"
+ },
+ {
+ "context": "ui",
+ "key": "Mandant",
+ "value": "Client"
+ },
+ {
+ "context": "ui",
+ "key": "Niedrig",
+ "value": "Faible"
+ },
+ {
+ "context": "ui",
+ "key": "Transaktionen",
+ "value": "Transactions"
+ },
+ {
+ "context": "ui",
+ "key": "Warnschwelle",
+ "value": "Seuil d'alerte"
+ },
+ {
+ "context": "ui",
+ "key": "✓ Mandat eingereicht",
+ "value": "✓ Mandat Soumis"
+ }
+ ],
+ "status": "complete",
+ "isDefault": false
+ }
+]
diff --git a/modules/routes/routeAdmin.py b/modules/routes/routeAdmin.py
index ed5bf42c..0f671f0a 100644
--- a/modules/routes/routeAdmin.py
+++ b/modules/routes/routeAdmin.py
@@ -13,6 +13,8 @@ from modules.shared.configuration import APP_CONFIG
from modules.auth import limiter, getCurrentUser
from modules.datamodels.datamodelUam import User
from modules.interfaces.interfaceDbApp import getRootInterface
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeAdmin")
# Static folder setup - using absolute path from app root
baseDir = FilePath(__file__).parent.parent.parent # Go up to gateway root
@@ -39,7 +41,7 @@ def root(request: Request) -> Dict[str, str]:
allowedOrigins = APP_CONFIG.get("APP_ALLOWED_ORIGINS")
if not allowedOrigins:
raise HTTPException(
- status_code=500, detail="APP_ALLOWED_ORIGINS configuration is required"
+ status_code=500, detail=routeApiMsg("APP_ALLOWED_ORIGINS configuration is required")
)
return {
@@ -59,17 +61,17 @@ def get_environment(
apiBaseUrl = APP_CONFIG.get("APP_API_URL")
if not apiBaseUrl:
raise HTTPException(
- status_code=500, detail="APP_API_URL configuration is required"
+ status_code=500, detail=routeApiMsg("APP_API_URL configuration is required")
)
environment = APP_CONFIG.get("APP_ENV")
if not environment:
- raise HTTPException(status_code=500, detail="APP_ENV configuration is required")
+ raise HTTPException(status_code=500, detail=routeApiMsg("APP_ENV configuration is required"))
instanceLabel = APP_CONFIG.get("APP_ENV_LABEL")
if not instanceLabel:
raise HTTPException(
- status_code=500, detail="APP_ENV_LABEL configuration is required"
+ status_code=500, detail=routeApiMsg("APP_ENV_LABEL configuration is required")
)
return {
@@ -91,5 +93,5 @@ def options_route(request: Request, fullPath: str) -> Response:
def favicon(request: Request) -> FileResponse:
favicon_path = staticFolder / "favicon.ico"
if not favicon_path.exists():
- raise HTTPException(status_code=404, detail="Favicon not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Favicon not found"))
return FileResponse(str(favicon_path), media_type="image/x-icon")
diff --git a/modules/routes/routeAdminAutomationEvents.py b/modules/routes/routeAdminAutomationEvents.py
deleted file mode 100644
index 553c66d3..00000000
--- a/modules/routes/routeAdminAutomationEvents.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Admin automation events routes for the backend API.
-Sysadmin-only endpoints for viewing and controlling scheduler events.
-"""
-
-from fastapi import APIRouter, HTTPException, Depends, Path, Request, Response, Query
-from typing import List, Dict, Any, Optional
-from fastapi import status
-import logging
-import json
-import math
-
-# Import interfaces and models from feature containers
-import modules.features.automation.interfaceFeatureAutomation as interfaceAutomation
-from modules.auth import limiter, getRequestContext, requireSysAdminRole, RequestContext
-from modules.datamodels.datamodelUam import User
-from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
-from modules.routes.routeDataUsers import _applyFiltersAndSort, _extractDistinctValues
-
-# Configure logger
-logger = logging.getLogger(__name__)
-
-# Create router for admin automation events endpoints
-router = APIRouter(
- prefix="/api/admin/automation-events",
- tags=["Admin Automation Events"],
- responses={
- 404: {"description": "Not found"},
- 400: {"description": "Bad request"},
- 401: {"description": "Unauthorized"},
- 403: {"description": "Forbidden - Sysadmin only"},
- 500: {"description": "Internal server error"}
- }
-)
-
-def _buildEnrichedAutomationEvents(currentUser: User) -> List[Dict[str, Any]]:
- """Build the full enriched automation events list."""
- from modules.shared.eventManagement import eventManager
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.features.automation.mainAutomation import getAutomationServices
-
- if not eventManager.scheduler:
- return []
-
- jobs = []
- for job in eventManager.scheduler.get_jobs():
- if job.id.startswith("automation."):
- automationId = job.id.replace("automation.", "")
- jobs.append({
- "eventId": job.id,
- "id": job.id,
- "automationId": automationId,
- "nextRunTime": str(job.next_run_time) if job.next_run_time else None,
- "trigger": str(job.trigger) if job.trigger else None,
- "name": "",
- "createdBy": "",
- "mandate": "",
- "featureInstance": ""
- })
-
- if jobs:
- try:
- rootInterface = getRootInterface()
- eventUser = rootInterface.getUserByUsername("event")
- if eventUser:
- services = getAutomationServices(currentUser, mandateId=None, featureInstanceId=None)
- allAutomations = services.interfaceDbAutomation.getAllAutomationDefinitionsWithRBAC(eventUser)
-
- automationLookup = {}
- for a in allAutomations:
- aId = a.get("id", "") if isinstance(a, dict) else getattr(a, "id", "")
- automationLookup[aId] = a
-
- _userCache: Dict[str, str] = {}
- _mandateCache: Dict[str, str] = {}
- _featureCache: Dict[str, str] = {}
-
- def _resolveUsername(userId):
- if not userId: return ""
- if userId not in _userCache:
- try:
- user = rootInterface.getUser(userId)
- _userCache[userId] = user.username if user else userId[:8]
- except Exception:
- _userCache[userId] = userId[:8]
- return _userCache[userId]
-
- def _resolveMandateLabel(mandateId):
- if not mandateId: return ""
- if mandateId not in _mandateCache:
- try:
- mandate = rootInterface.getMandate(mandateId)
- _mandateCache[mandateId] = getattr(mandate, "label", None) or mandateId[:8]
- except Exception:
- _mandateCache[mandateId] = mandateId[:8]
- return _mandateCache[mandateId]
-
- def _resolveFeatureLabel(featureInstanceId):
- if not featureInstanceId: return ""
- if featureInstanceId not in _featureCache:
- try:
- instance = rootInterface.getFeatureInstance(featureInstanceId)
- _featureCache[featureInstanceId] = getattr(instance, "label", None) or getattr(instance, "featureCode", None) or featureInstanceId[:8]
- except Exception:
- _featureCache[featureInstanceId] = featureInstanceId[:8]
- return _featureCache[featureInstanceId]
-
- for job in jobs:
- automation = automationLookup.get(job["automationId"])
- if automation:
- if isinstance(automation, dict):
- job["name"] = automation.get("label", "")
- job["createdBy"] = _resolveUsername(automation.get("sysCreatedBy", ""))
- job["mandate"] = _resolveMandateLabel(automation.get("mandateId", ""))
- job["featureInstance"] = _resolveFeatureLabel(automation.get("featureInstanceId", ""))
- else:
- job["name"] = getattr(automation, "label", "")
- job["createdBy"] = _resolveUsername(getattr(automation, "sysCreatedBy", ""))
- job["mandate"] = _resolveMandateLabel(getattr(automation, "mandateId", ""))
- job["featureInstance"] = _resolveFeatureLabel(getattr(automation, "featureInstanceId", ""))
- else:
- job["name"] = "(orphaned)"
- except Exception as e:
- logger.warning(f"Could not enrich automation events with context: {e}")
-
- return jobs
-
-
-@router.get("")
-@limiter.limit("30/minute")
-def get_all_automation_events(
- request: Request,
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
- currentUser: User = Depends(requireSysAdminRole),
-):
- """Get all active scheduler jobs with pagination support (sysadmin only)."""
- try:
- paginationParams: Optional[PaginationParams] = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
-
- enriched = _buildEnrichedAutomationEvents(currentUser)
- filtered = _applyFiltersAndSort(enriched, paginationParams)
-
- if paginationParams:
- totalItems = len(filtered)
- totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
- startIdx = (paginationParams.page - 1) * paginationParams.pageSize
- endIdx = startIdx + paginationParams.pageSize
- return {
- "items": filtered[startIdx:endIdx],
- "pagination": PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=totalItems,
- totalPages=totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters,
- ).model_dump(),
- }
-
- return {"items": enriched, "pagination": None}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting automation events: {str(e)}")
- raise HTTPException(status_code=500, detail=f"Error getting automation events: {str(e)}")
-
-
-@router.get("/filter-values")
-@limiter.limit("60/minute")
-def get_automation_event_filter_values(
- request: Request,
- column: str = Query(..., description="Column key"),
- pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
- currentUser: User = Depends(requireSysAdminRole),
-):
- """Return distinct filter values for a column in automation events."""
- try:
- crossFilterParams: Optional[PaginationParams] = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- filters = paginationDict.get("filters", {})
- filters.pop(column, None)
- paginationDict["filters"] = filters
- paginationDict.pop("sort", None)
- crossFilterParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError):
- pass
-
- enriched = _buildEnrichedAutomationEvents(currentUser)
- crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams)
- return _extractDistinctValues(crossFiltered, column)
- except Exception as e:
- logger.error(f"Error getting filter values: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
-
-@router.post("/sync")
-@limiter.limit("5/minute")
-async def sync_all_automation_events(
- request: Request,
- currentUser: User = Depends(requireSysAdminRole)
-) -> Dict[str, Any]:
- """
- Manually trigger sync for all automations (sysadmin only).
- This will register/remove events based on active flags.
- """
- try:
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.workflows.automation import syncAutomationEvents
-
- # Get event user for sync operation (routes can import from interfaces)
- rootInterface = getRootInterface()
- eventUser = rootInterface.getUserByUsername("event")
- if not eventUser:
- raise HTTPException(
- status_code=500,
- detail="Event user not available"
- )
-
- from modules.features.automation.mainAutomation import getAutomationServices
- services = getAutomationServices(currentUser, mandateId=None, featureInstanceId=None)
- result = syncAutomationEvents(services, eventUser)
- return {
- "success": True,
- "synced": result.get("synced", 0),
- "events": result.get("events", {})
- }
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error syncing automation events: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error syncing automation events: {str(e)}"
- )
-
-@router.post("/{eventId}/remove")
-@limiter.limit("10/minute")
-def remove_event(
- request: Request,
- eventId: str = Path(..., description="Event ID to remove"),
- currentUser: User = Depends(requireSysAdminRole)
-) -> Dict[str, Any]:
- """
- Remove a scheduler job (sysadmin only).
- Removes the job from the scheduler and clears the eventId on the automation definition.
- Does NOT delete the automation definition itself.
- """
- try:
- from modules.shared.eventManagement import eventManager
-
- # Remove scheduler job
- eventManager.remove(eventId)
-
- # Clear eventId on the automation definition (so it can be re-synced later)
- if eventId.startswith("automation."):
- automationId = eventId.replace("automation.", "")
- automationInterface = interfaceAutomation.getInterface(currentUser)
- automation = automationInterface.getAutomationDefinition(automationId)
- if automation and getattr(automation, "eventId", None) == eventId:
- automationInterface.updateAutomationDefinition(automationId, {"eventId": None})
-
- return {
- "success": True,
- "eventId": eventId,
- "message": f"Event {eventId} removed successfully"
- }
- except Exception as e:
- logger.error(f"Error removing event: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Error removing event: {str(e)}"
- )
diff --git a/modules/routes/routeAdminAutomationLogs.py b/modules/routes/routeAdminAutomationLogs.py
deleted file mode 100644
index 479d0df3..00000000
--- a/modules/routes/routeAdminAutomationLogs.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Admin automation execution logs routes.
-SysAdmin-only endpoints for viewing consolidated automation execution history
-across all mandates and feature instances.
-"""
-
-from fastapi import APIRouter, HTTPException, Depends, Request, Query
-from typing import List, Dict, Any, Optional
-import logging
-import json
-import math
-import uuid
-
-from modules.auth import limiter, requireSysAdminRole
-from modules.datamodels.datamodelUam import User
-from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
-from modules.routes.routeDataUsers import _applyFiltersAndSort, _extractDistinctValues
-
-logger = logging.getLogger(__name__)
-
-router = APIRouter(
- prefix="/api/admin/automation-logs",
- tags=["Admin Automation Logs"],
- responses={
- 401: {"description": "Unauthorized"},
- 403: {"description": "Forbidden - Sysadmin only"},
- 500: {"description": "Internal server error"},
- },
-)
-
-
-def _buildFlattenedExecutionLogs(currentUser: User) -> List[Dict[str, Any]]:
- """Flatten executionLogs from all AutomationDefinitions across all mandates.
- Called from a SysAdmin-only endpoint — bypasses RBAC, reads directly from DB."""
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.features.automation.mainAutomation import getAutomationServices
- from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition
-
- rootInterface = getRootInterface()
- services = getAutomationServices(currentUser, mandateId=None, featureInstanceId=None)
- allAutomations = services.interfaceDbAutomation.db.getRecordset(AutomationDefinition)
-
- userCache: Dict[str, str] = {}
- mandateCache: Dict[str, str] = {}
- featureCache: Dict[str, str] = {}
-
- def _resolveUsername(userId: str) -> str:
- if not userId:
- return ""
- if userId not in userCache:
- try:
- user = rootInterface.getUser(userId)
- userCache[userId] = user.username if user else userId[:8]
- except Exception:
- userCache[userId] = userId[:8]
- return userCache[userId]
-
- def _resolveMandateLabel(mandateId: str) -> str:
- if not mandateId:
- return ""
- if mandateId not in mandateCache:
- try:
- mandate = rootInterface.getMandate(mandateId)
- mandateCache[mandateId] = getattr(mandate, "label", None) or mandateId[:8]
- except Exception:
- mandateCache[mandateId] = mandateId[:8]
- return mandateCache[mandateId]
-
- def _resolveFeatureLabel(featureInstanceId: str) -> str:
- if not featureInstanceId:
- return ""
- if featureInstanceId not in featureCache:
- try:
- instance = rootInterface.getFeatureInstance(featureInstanceId)
- featureCache[featureInstanceId] = (
- getattr(instance, "label", None)
- or getattr(instance, "featureCode", None)
- or featureInstanceId[:8]
- )
- except Exception:
- featureCache[featureInstanceId] = featureInstanceId[:8]
- return featureCache[featureInstanceId]
-
- flatLogs: List[Dict[str, Any]] = []
-
- for automation in allAutomations:
- if isinstance(automation, dict):
- automationId = automation.get("id", "")
- automationLabel = automation.get("label", "")
- mandateId = automation.get("mandateId", "")
- featureInstanceId = automation.get("featureInstanceId", "")
- createdBy = automation.get("sysCreatedBy", "")
- logs = automation.get("executionLogs") or []
- else:
- automationId = getattr(automation, "id", "")
- automationLabel = getattr(automation, "label", "")
- mandateId = getattr(automation, "mandateId", "")
- featureInstanceId = getattr(automation, "featureInstanceId", "")
- createdBy = getattr(automation, "sysCreatedBy", "")
- logs = getattr(automation, "executionLogs", None) or []
-
- mandateName = _resolveMandateLabel(mandateId)
- featureInstanceName = _resolveFeatureLabel(featureInstanceId)
- executedByName = _resolveUsername(createdBy)
-
- for log in logs:
- timestamp = log.get("timestamp", 0) if isinstance(log, dict) else 0
- status = log.get("status", "") if isinstance(log, dict) else ""
- workflowId = log.get("workflowId", "") if isinstance(log, dict) else ""
- messages = log.get("messages", []) if isinstance(log, dict) else []
-
- flatLogs.append({
- "id": str(uuid.uuid4()),
- "timestamp": timestamp,
- "automationId": automationId,
- "automationLabel": automationLabel,
- "mandateName": mandateName,
- "featureInstanceName": featureInstanceName,
- "executedBy": executedByName,
- "status": status,
- "workflowId": workflowId,
- "messages": "; ".join(messages) if messages else "",
- })
-
- flatLogs.sort(key=lambda x: x.get("timestamp", 0), reverse=True)
- return flatLogs
-
-
-@router.get("")
-@limiter.limit("30/minute")
-def get_all_automation_logs(
- request: Request,
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
- currentUser: User = Depends(requireSysAdminRole),
-):
- """Get consolidated execution logs from all automations (sysadmin only)."""
- try:
- paginationParams: Optional[PaginationParams] = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
-
- logs = _buildFlattenedExecutionLogs(currentUser)
- filtered = _applyFiltersAndSort(logs, paginationParams)
-
- if paginationParams:
- totalItems = len(filtered)
- totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
- startIdx = (paginationParams.page - 1) * paginationParams.pageSize
- endIdx = startIdx + paginationParams.pageSize
- return {
- "items": filtered[startIdx:endIdx],
- "pagination": PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=totalItems,
- totalPages=totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters,
- ).model_dump(),
- }
-
- return {"items": logs, "pagination": None}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting automation logs: {str(e)}")
- raise HTTPException(status_code=500, detail=f"Error getting automation logs: {str(e)}")
-
-
-@router.get("/filter-values")
-@limiter.limit("60/minute")
-def get_automation_log_filter_values(
- request: Request,
- column: str = Query(..., description="Column key"),
- pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
- currentUser: User = Depends(requireSysAdminRole),
-):
- """Return distinct filter values for a column in automation logs."""
- try:
- crossFilterParams: Optional[PaginationParams] = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- filters = paginationDict.get("filters", {})
- filters.pop(column, None)
- paginationDict["filters"] = filters
- paginationDict.pop("sort", None)
- crossFilterParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError):
- pass
-
- logs = _buildFlattenedExecutionLogs(currentUser)
- crossFiltered = _applyFiltersAndSort(logs, crossFilterParams)
- return _extractDistinctValues(crossFiltered, column)
- except Exception as e:
- logger.error(f"Error getting filter values: {str(e)}")
- raise HTTPException(status_code=500, detail=str(e))
diff --git a/modules/routes/routeAdminDemoConfig.py b/modules/routes/routeAdminDemoConfig.py
new file mode 100644
index 00000000..b85cc38c
--- /dev/null
+++ b/modules/routes/routeAdminDemoConfig.py
@@ -0,0 +1,86 @@
+"""
+Admin Demo Config API
+
+Provides endpoints to list, load, and remove demo configurations.
+SysAdmin-only access.
+"""
+
+import logging
+from fastapi import APIRouter, Depends, HTTPException, Request, status
+
+from modules.auth import limiter
+from modules.auth.authentication import requireSysAdminRole
+from modules.datamodels.datamodelUam import User
+from modules.security.rootAccess import getRootDbAppConnector
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(
+ prefix="/api/admin/demo-config",
+ tags=["Admin Demo Config"],
+)
+
+
+@router.get("")
+@limiter.limit("30/minute")
+def listDemoConfigs(
+ request: Request,
+ currentUser: User = Depends(requireSysAdminRole),
+) -> dict:
+ """List all available demo configurations."""
+ from modules.demoConfigs import _getAvailableDemoConfigs
+
+ configs = _getAvailableDemoConfigs()
+ return {
+ "configs": [cfg.toDict() for cfg in configs.values()],
+ }
+
+
+@router.post("/{code}/load")
+@limiter.limit("5/minute")
+def loadDemoConfig(
+ code: str,
+ request: Request,
+ currentUser: User = Depends(requireSysAdminRole),
+) -> dict:
+ """Load (create) a demo configuration. Idempotent."""
+ from modules.demoConfigs import _getDemoConfigByCode
+
+ config = _getDemoConfigByCode(code)
+ if not config:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Demo config '{code}' not found",
+ )
+
+ db = getRootDbAppConnector()
+ logger.info(f"Loading demo config '{code}' (user: {currentUser.username})")
+ summary = config.load(db)
+ logger.info(f"Demo config '{code}' loaded: {summary}")
+
+ return {"status": "ok", "code": code, "summary": summary}
+
+
+@router.post("/{code}/remove")
+@limiter.limit("5/minute")
+def removeDemoConfig(
+ code: str,
+ request: Request,
+ currentUser: User = Depends(requireSysAdminRole),
+) -> dict:
+ """Remove all data created by a demo configuration."""
+ from modules.demoConfigs import _getDemoConfigByCode
+
+ config = _getDemoConfigByCode(code)
+ if not config:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Demo config '{code}' not found",
+ )
+
+ db = getRootDbAppConnector()
+ logger.info(f"Removing demo config '{code}' (user: {currentUser.username})")
+ summary = config.remove(db)
+ logger.info(f"Demo config '{code}' removed: {summary}")
+
+ return {"status": "ok", "code": code, "summary": summary}
diff --git a/modules/routes/routeAdminFeatures.py b/modules/routes/routeAdminFeatures.py
index 9d05daf6..e052a9a2 100644
--- a/modules/routes/routeAdminFeatures.py
+++ b/modules/routes/routeAdminFeatures.py
@@ -11,7 +11,7 @@ Multi-Tenant Design:
"""
from fastapi import APIRouter, HTTPException, Depends, Request, Query
-from typing import List, Dict, Any, Optional
+from typing import List, Dict, Any, Optional, Union
from fastapi import status
import logging
import json
@@ -27,6 +27,8 @@ from modules.interfaces.interfaceDbApp import getRootInterface
from modules.interfaces.interfaceFeatures import getFeatureInterface
from modules.security.rbacCatalog import getCatalogService
from modules.routes.routeNotifications import create_access_change_notification
+from modules.shared.i18nRegistry import apiRouteContext, resolveText
+routeApiMsg = apiRouteContext("routeAdminFeatures")
logger = logging.getLogger(__name__)
@@ -183,7 +185,7 @@ def get_my_feature_instances(
featureDef = catalogService.getFeatureDefinition(instance.featureCode)
featuresMap[featureKey] = {
"code": instance.featureCode,
- "label": featureDef.get("label", {"de": instance.featureCode, "en": instance.featureCode}) if featureDef else {"de": instance.featureCode, "en": instance.featureCode},
+ "label": resolveText(featureDef.get("label") if featureDef else None),
"icon": featureDef.get("icon", "folder") if featureDef else "folder",
"instances": [],
"_mandateId": mandateId # Temporary for grouping
@@ -397,37 +399,80 @@ def create_feature(
# Feature Instance Endpoints (Mandate-scoped)
# =============================================================================
-@router.get("/instances", response_model=List[Dict[str, Any]])
+@router.get("/instances")
@limiter.limit("60/minute")
def list_feature_instances(
request: Request,
featureCode: Optional[str] = Query(None, description="Filter by feature code"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
-) -> List[Dict[str, Any]]:
+):
"""
- List feature instances for the current mandate.
+ List feature instances.
- Returns instances the user has access to within the selected mandate.
+ With X-Mandate-Id: returns instances for that mandate.
+ Without X-Mandate-Id: returns all instances the user has access to
+ (via FeatureAccess records). Used for FK resolution in tables.
Args:
featureCode: Optional filter by feature code
+ pagination: JSON-encoded PaginationParams (page, pageSize, sort, filters)
"""
- if not context.mandateId:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required"
- )
-
try:
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except (json.JSONDecodeError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
+
rootInterface = getRootInterface()
featureInterface = getFeatureInterface(rootInterface.db)
- instances = featureInterface.getFeatureInstancesForMandate(
- mandateId=str(context.mandateId),
- featureCode=featureCode
- )
+ if context.mandateId:
+ instances = featureInterface.getFeatureInstancesForMandate(
+ mandateId=str(context.mandateId),
+ featureCode=featureCode
+ )
+ else:
+ featureAccesses = rootInterface.getFeatureAccessesForUser(str(context.user.id))
+ seen = set()
+ instances = []
+ for fa in featureAccesses:
+ instId = str(fa.featureInstanceId)
+ if instId in seen:
+ continue
+ seen.add(instId)
+ inst = featureInterface.getFeatureInstance(instId)
+ if inst and inst.enabled:
+ if featureCode and inst.featureCode != featureCode:
+ continue
+ instances.append(inst)
- return [inst.model_dump() for inst in instances]
+ items = [inst.model_dump() for inst in instances]
+
+ if paginationParams:
+ filtered = _applyFiltersAndSort(items, paginationParams)
+ totalItems = len(filtered)
+ totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
+ startIdx = (paginationParams.page - 1) * paginationParams.pageSize
+ endIdx = startIdx + paginationParams.pageSize
+ return {
+ "items": filtered[startIdx:endIdx],
+ "pagination": PaginationMetadata(
+ currentPage=paginationParams.page,
+ pageSize=paginationParams.pageSize,
+ totalItems=totalItems,
+ totalPages=totalPages,
+ sort=paginationParams.sort,
+ filters=paginationParams.filters,
+ ).model_dump(),
+ }
+ else:
+ return items
except HTTPException:
raise
@@ -450,7 +495,7 @@ def get_feature_instance_filter_values(
) -> list:
"""Return distinct filter values for a column in feature instances."""
if not context.mandateId:
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="X-Mandate-Id header is required")
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=routeApiMsg("X-Mandate-Id header is required"))
try:
from modules.routes.routeDataUsers import _handleFilterValuesRequest
rootInterface = getRootInterface()
@@ -497,7 +542,7 @@ def get_feature_instance(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
return instance.model_dump()
@@ -530,14 +575,14 @@ def create_feature_instance(
if not context.mandateId:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required"
+ detail=routeApiMsg("X-Mandate-Id header is required")
)
# Check mandate admin permission
if not _hasMandateAdminRole(context):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to create feature instances"
+ detail=routeApiMsg("Mandate-Admin role required to create feature instances")
)
try:
@@ -637,14 +682,14 @@ def delete_feature_instance(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Check mandate admin permission
if not _hasMandateAdminRole(context) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to delete feature instances"
+ detail=routeApiMsg("Mandate-Admin role required to delete feature instances")
)
featureInterface.deleteFeatureInstance(instanceId)
@@ -704,14 +749,14 @@ def updateFeatureInstance(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Check mandate admin permission
if not _hasMandateAdminRole(context) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to update feature instances"
+ detail=routeApiMsg("Mandate-Admin role required to update feature instances")
)
# Build update data (only non-None values)
@@ -730,7 +775,7 @@ def updateFeatureInstance(
if not updated:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to update feature instance"
+ detail=routeApiMsg("Failed to update feature instance")
)
# Clear chatbot config cache when config was updated for chatbot instances
@@ -787,14 +832,14 @@ def sync_instance_roles(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Check admin permission (Mandate-Admin or Feature-Admin)
if not _hasMandateAdminRole(context) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to sync roles"
+ detail=routeApiMsg("Admin role required to sync roles")
)
result = featureInterface.syncRolesFromTemplate(instanceId, addOnly)
@@ -824,7 +869,12 @@ def _buildTemplateRolesList(featureCode: Optional[str] = None) -> List[Dict[str,
rootInterface = getRootInterface()
featureInterface = getFeatureInterface(rootInterface.db)
roles = featureInterface.getTemplateRoles(featureCode)
- return [r.model_dump() for r in roles]
+ result = []
+ for r in roles:
+ d = r.model_dump()
+ d["description"] = resolveText(r.description)
+ result.append(d)
+ return result
@router.get("/templates/roles")
@@ -995,13 +1045,14 @@ class FeatureInstanceUserUpdate(BaseModel):
enabled: Optional[bool] = Field(None, description="Whether this user's access is active (omit to leave unchanged)")
-@router.get("/instances/{instanceId}/users", response_model=List[FeatureInstanceUserResponse])
+@router.get("/instances/{instanceId}/users")
@limiter.limit("60/minute")
def list_feature_instance_users(
request: Request,
instanceId: str,
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
-) -> List[FeatureInstanceUserResponse]:
+):
"""
List all users with access to a specific feature instance.
@@ -1027,7 +1078,7 @@ def list_feature_instance_users(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Get all FeatureAccess records for this instance (Pydantic models)
@@ -1061,7 +1112,33 @@ def list_feature_instance_users(
enabled=fa.enabled
))
- return result
+ items = [r.model_dump() for r in result]
+
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except (json.JSONDecodeError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
+
+ if paginationParams:
+ filtered = _applyFiltersAndSort(items, paginationParams)
+ totalItems = len(filtered)
+ totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
+ startIdx = (paginationParams.page - 1) * paginationParams.pageSize
+ endIdx = startIdx + paginationParams.pageSize
+ return {
+ "items": filtered[startIdx:endIdx],
+ "pagination": PaginationMetadata(
+ currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
+ totalItems=totalItems, totalPages=totalPages,
+ sort=paginationParams.sort, filters=paginationParams.filters,
+ ).model_dump(),
+ }
+ return items
except HTTPException:
raise
@@ -1092,7 +1169,7 @@ def get_feature_instance_users_filter_values(
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Feature instance '{instanceId}' not found")
if context.mandateId and str(instance.mandateId) != str(context.mandateId):
if not context.hasSysAdminRole:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access denied to this feature instance")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Access denied to this feature instance"))
featureAccesses = rootInterface.getFeatureAccessesByInstance(instanceId)
result = []
for fa in featureAccesses:
@@ -1157,14 +1234,14 @@ def add_user_to_feature_instance(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Check admin permission
if not _hasMandateAdminRole(context) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to add users to feature instances"
+ detail=routeApiMsg("Admin role required to add users to feature instances")
)
# Verify user exists
@@ -1178,7 +1255,7 @@ def add_user_to_feature_instance(
if not data.roleIds:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="At least one role is required to grant feature access"
+ detail=routeApiMsg("At least one role is required to grant feature access")
)
from modules.datamodels.datamodelRbac import Role
@@ -1265,14 +1342,14 @@ def remove_user_from_feature_instance(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Check admin permission
if not _hasMandateAdminRole(context) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to remove users from feature instances"
+ detail=routeApiMsg("Admin role required to remove users from feature instances")
)
# Find FeatureAccess record
@@ -1281,7 +1358,7 @@ def remove_user_from_feature_instance(
if not existingAccess:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="User does not have access to this feature instance"
+ detail=routeApiMsg("User does not have access to this feature instance")
)
featureAccessId = str(existingAccess.id)
@@ -1355,14 +1432,14 @@ def update_feature_instance_user_roles(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Check admin permission
if not _hasMandateAdminRole(context) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to update user roles"
+ detail=routeApiMsg("Admin role required to update user roles")
)
# Find FeatureAccess record
@@ -1371,7 +1448,7 @@ def update_feature_instance_user_roles(
if not existingAccess:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="User does not have access to this feature instance"
+ detail=routeApiMsg("User does not have access to this feature instance")
)
featureAccessId = str(existingAccess.id)
@@ -1463,7 +1540,7 @@ def get_feature_instance_available_roles(
if not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this feature instance"
+ detail=routeApiMsg("Access denied to this feature instance")
)
# Get roles for this instance using interface method
@@ -1474,7 +1551,7 @@ def get_feature_instance_available_roles(
result.append({
"id": role.id,
"roleLabel": role.roleLabel,
- "description": role.description or {},
+ "description": resolveText(role.description),
"featureCode": role.featureCode,
"isSystemRole": role.isSystemRole
})
@@ -1559,7 +1636,7 @@ def _renameFeatureInstance(
instance = featureInterface.getFeatureInstance(instanceId)
if not instance:
- raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Feature instance not found")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("Feature instance not found"))
userId = str(context.user.id)
isInstanceAdmin = False
@@ -1577,11 +1654,11 @@ def _renameFeatureInstance(
break
if not isInstanceAdmin:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Instance admin role required to rename")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Instance admin role required to rename"))
updated = featureInterface.updateFeatureInstance(instanceId, {"label": data.label.strip()})
if not updated:
- raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update instance")
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=routeApiMsg("Failed to update instance"))
return {"id": instanceId, "label": updated.label}
diff --git a/modules/routes/routeAdminRbacExport.py b/modules/routes/routeAdminRbacExport.py
deleted file mode 100644
index c499a147..00000000
--- a/modules/routes/routeAdminRbacExport.py
+++ /dev/null
@@ -1,599 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-RBAC export/import routes for the backend API.
-Implements endpoints for exporting and importing RBAC configurations.
-
-Multi-Tenant Design:
-- Global templates: SysAdmin can export/import
-- Mandate-scoped RBAC: Mandate Admin can export/import
-- Feature instance roles: Included in mandate export
-"""
-
-from fastapi import APIRouter, HTTPException, Depends, Request, UploadFile, File
-from fastapi.responses import JSONResponse
-from typing import List, Dict, Any, Optional
-from fastapi import status
-import logging
-import json
-from pydantic import BaseModel, Field
-
-from modules.auth import limiter, getRequestContext, RequestContext, requireSysAdminRole
-from modules.datamodels.datamodelUam import User
-from modules.datamodels.datamodelRbac import Role, AccessRule
-from modules.interfaces.interfaceDbApp import getRootInterface
-from modules.shared.timeUtils import getUtcTimestamp
-
-logger = logging.getLogger(__name__)
-
-router = APIRouter(
- prefix="/api/rbac",
- tags=["RBAC Export/Import"],
- responses={404: {"description": "Not found"}}
-)
-
-
-# =============================================================================
-# Request/Response Models
-# =============================================================================
-
-class RoleExport(BaseModel):
- """Export model for a role with its access rules"""
- roleLabel: str
- description: Dict[str, str]
- featureCode: Optional[str]
- isSystemRole: bool
- accessRules: List[Dict[str, Any]]
-
-
-class RbacExportData(BaseModel):
- """Complete RBAC export data"""
- exportVersion: str = "1.0"
- exportedAt: float
- exportedBy: str
- scope: str # "global" or "mandate"
- mandateId: Optional[str]
- roles: List[RoleExport]
-
-
-class RbacImportResult(BaseModel):
- """Result of RBAC import operation"""
- rolesCreated: int
- rolesUpdated: int
- rolesSkipped: int
- rulesCreated: int
- rulesUpdated: int
- errors: List[str]
-
-
-# =============================================================================
-# Global RBAC Export/Import (SysAdmin only)
-# =============================================================================
-
-@router.get("/export/global", response_model=RbacExportData)
-@limiter.limit("10/minute")
-def export_global_rbac(
- request: Request,
- sysAdmin: User = Depends(requireSysAdminRole)
-) -> RbacExportData:
- """
- Export global (template) RBAC rules.
-
- SysAdmin only - exports template roles that are copied to new feature instances.
- These are roles with mandateId=NULL.
- """
- try:
- rootInterface = getRootInterface()
-
- # Get all global template roles (mandateId is NULL) using interface method
- allRoles = rootInterface.getAllRoles()
- globalRoles = [r for r in allRoles if r.mandateId is None]
-
- exportRoles = []
- for role in globalRoles:
- roleId = role.id
-
- # Get access rules for this role using interface method
- accessRules = rootInterface.getAccessRulesByRole(roleId)
-
- exportRoles.append(RoleExport(
- roleLabel=role.roleLabel,
- description=role.description or {},
- featureCode=role.featureCode,
- isSystemRole=role.isSystemRole,
- accessRules=[
- {
- "context": r.context,
- "item": r.item,
- "view": r.view if r.view is not None else False,
- "read": r.read,
- "create": r.create,
- "update": r.update,
- "delete": r.delete
- }
- for r in accessRules
- ]
- ))
-
- logger.info(f"SysAdmin {sysAdmin.id} exported global RBAC ({len(exportRoles)} roles)")
-
- return RbacExportData(
- exportedAt=getUtcTimestamp(),
- exportedBy=str(sysAdmin.id),
- scope="global",
- mandateId=None,
- roles=exportRoles
- )
-
- except Exception as e:
- logger.error(f"Error exporting global RBAC: {e}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Failed to export RBAC: {str(e)}"
- )
-
-
-@router.post("/import/global", response_model=RbacImportResult)
-@limiter.limit("5/minute")
-async def import_global_rbac(
- request: Request,
- file: UploadFile = File(..., description="JSON file with RBAC export data"),
- updateExisting: bool = False,
- sysAdmin: User = Depends(requireSysAdminRole)
-) -> RbacImportResult:
- """
- Import global (template) RBAC rules.
-
- SysAdmin only - imports template roles and their access rules.
-
- Args:
- file: JSON file containing RbacExportData
- updateExisting: If True, update existing roles. If False, skip them.
- """
- try:
- # Read and parse file
- content = await file.read()
- try:
- data = json.loads(content.decode("utf-8"))
- except json.JSONDecodeError as e:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=f"Invalid JSON: {str(e)}"
- )
-
- # Validate structure
- if "roles" not in data:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail="Missing 'roles' field in import data"
- )
-
- rootInterface = getRootInterface()
- result = RbacImportResult(
- rolesCreated=0,
- rolesUpdated=0,
- rolesSkipped=0,
- rulesCreated=0,
- rulesUpdated=0,
- errors=[]
- )
-
- for roleData in data.get("roles", []):
- try:
- roleLabel = roleData.get("roleLabel")
- featureCode = roleData.get("featureCode")
-
- if not roleLabel:
- result.errors.append(f"Role without label skipped")
- result.rolesSkipped += 1
- continue
-
- # Check if role exists (global role with same label and featureCode) using interface method
- allRoles = rootInterface.getAllRoles()
- existingRoles = [
- r for r in allRoles
- if r.roleLabel == roleLabel
- and r.mandateId is None
- and r.featureCode == featureCode
- ]
-
- if existingRoles:
- if updateExisting:
- # Update existing role
- existingRole = existingRoles[0]
- roleId = existingRole.id
-
- rootInterface.db.recordModify(
- Role,
- roleId,
- {
- "description": roleData.get("description", {}),
- "isSystemRole": roleData.get("isSystemRole", False)
- }
- )
-
- # Update access rules
- result.rulesUpdated += _updateAccessRules(
- rootInterface,
- roleId,
- roleData.get("accessRules", [])
- )
-
- result.rolesUpdated += 1
- else:
- result.rolesSkipped += 1
- continue
- else:
- # Create new role
- newRole = Role(
- roleLabel=roleLabel,
- description=roleData.get("description", {}),
- featureCode=featureCode,
- mandateId=None,
- featureInstanceId=None,
- isSystemRole=roleData.get("isSystemRole", False)
- )
-
- createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
- roleId = createdRole.get("id")
-
- # Create access rules
- for ruleData in roleData.get("accessRules", []):
- newRule = AccessRule(
- roleId=roleId,
- context=ruleData.get("context"),
- item=ruleData.get("item"),
- view=ruleData.get("view", False),
- read=ruleData.get("read"),
- create=ruleData.get("create"),
- update=ruleData.get("update"),
- delete=ruleData.get("delete")
- )
- rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
- result.rulesCreated += 1
-
- result.rolesCreated += 1
-
- except Exception as e:
- result.errors.append(f"Error processing role '{roleData.get('roleLabel', 'unknown')}': {str(e)}")
-
- logger.info(
- f"SysAdmin {sysAdmin.id} imported global RBAC: "
- f"{result.rolesCreated} created, {result.rolesUpdated} updated, "
- f"{result.rolesSkipped} skipped"
- )
-
- return result
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error importing global RBAC: {e}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Failed to import RBAC: {str(e)}"
- )
-
-
-# =============================================================================
-# Mandate RBAC Export/Import (Mandate Admin)
-# =============================================================================
-
-@router.get("/export/mandate", response_model=RbacExportData)
-@limiter.limit("10/minute")
-def export_mandate_rbac(
- request: Request,
- includeFeatureInstances: bool = True,
- context: RequestContext = Depends(getRequestContext)
-) -> RbacExportData:
- """
- Export RBAC rules for the current mandate.
-
- Requires Mandate-Admin role. Exports mandate-level roles and optionally
- feature instance roles.
-
- Args:
- includeFeatureInstances: Include feature instance roles in export
- """
- if not context.mandateId:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required"
- )
-
- # Check mandate admin permission
- if not _hasMandateAdminRole(context):
- raise HTTPException(
- status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to export RBAC"
- )
-
- try:
- rootInterface = getRootInterface()
-
- # Get mandate-level roles using interface method
- allRoles = rootInterface.getAllRoles()
- mandateRoles = [
- r for r in allRoles
- if str(r.mandateId) == str(context.mandateId)
- ]
-
- # Filter by feature instance if not including them
- if not includeFeatureInstances:
- mandateRoles = [r for r in mandateRoles if not r.featureInstanceId]
-
- exportRoles = []
- for role in mandateRoles:
- roleId = role.id
-
- # Get access rules for this role using interface method
- accessRules = rootInterface.getAccessRulesByRole(roleId)
-
- exportRoles.append(RoleExport(
- roleLabel=role.roleLabel,
- description=role.description or {},
- featureCode=role.featureCode,
- isSystemRole=role.isSystemRole,
- accessRules=[
- {
- "context": r.context,
- "item": r.item,
- "view": r.view if r.view is not None else False,
- "read": r.read,
- "create": r.create,
- "update": r.update,
- "delete": r.delete
- }
- for r in accessRules
- ]
- ))
-
- logger.info(
- f"User {context.user.id} exported mandate {context.mandateId} RBAC "
- f"({len(exportRoles)} roles)"
- )
-
- return RbacExportData(
- exportedAt=getUtcTimestamp(),
- exportedBy=str(context.user.id),
- scope="mandate",
- mandateId=str(context.mandateId),
- roles=exportRoles
- )
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error exporting mandate RBAC: {e}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Failed to export RBAC: {str(e)}"
- )
-
-
-@router.post("/import/mandate", response_model=RbacImportResult)
-@limiter.limit("5/minute")
-async def import_mandate_rbac(
- request: Request,
- file: UploadFile = File(..., description="JSON file with RBAC export data"),
- updateExisting: bool = False,
- context: RequestContext = Depends(getRequestContext)
-) -> RbacImportResult:
- """
- Import RBAC rules for the current mandate.
-
- Requires Mandate-Admin role. Imports roles as mandate-level roles
- (not feature instance roles - those are created via template copying).
-
- Args:
- file: JSON file containing RbacExportData
- updateExisting: If True, update existing roles. If False, skip them.
- """
- if not context.mandateId:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required"
- )
-
- # Check mandate admin permission
- if not _hasMandateAdminRole(context):
- raise HTTPException(
- status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to import RBAC"
- )
-
- try:
- # Read and parse file
- content = await file.read()
- try:
- data = json.loads(content.decode("utf-8"))
- except json.JSONDecodeError as e:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=f"Invalid JSON: {str(e)}"
- )
-
- # Validate structure
- if "roles" not in data:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail="Missing 'roles' field in import data"
- )
-
- rootInterface = getRootInterface()
- result = RbacImportResult(
- rolesCreated=0,
- rolesUpdated=0,
- rolesSkipped=0,
- rulesCreated=0,
- rulesUpdated=0,
- errors=[]
- )
-
- for roleData in data.get("roles", []):
- try:
- roleLabel = roleData.get("roleLabel")
- featureCode = roleData.get("featureCode")
-
- if not roleLabel:
- result.errors.append(f"Role without label skipped")
- result.rolesSkipped += 1
- continue
-
- # System roles cannot be imported at mandate level
- if roleData.get("isSystemRole", False):
- result.errors.append(f"System role '{roleLabel}' skipped (SysAdmin only)")
- result.rolesSkipped += 1
- continue
-
- # Check if role exists (mandate role with same label) using interface method
- allRoles = rootInterface.getAllRoles()
- existingRoles = [
- r for r in allRoles
- if r.roleLabel == roleLabel
- and str(r.mandateId) == str(context.mandateId)
- and r.featureInstanceId is None # Only mandate-level roles
- ]
-
- if existingRoles:
- if updateExisting:
- # Update existing role
- existingRole = existingRoles[0]
- roleId = existingRole.id
-
- rootInterface.db.recordModify(
- Role,
- roleId,
- {"description": roleData.get("description", {})}
- )
-
- # Update access rules
- result.rulesUpdated += _updateAccessRules(
- rootInterface,
- roleId,
- roleData.get("accessRules", [])
- )
-
- result.rolesUpdated += 1
- else:
- result.rolesSkipped += 1
- continue
- else:
- # Create new role at mandate level
- newRole = Role(
- roleLabel=roleLabel,
- description=roleData.get("description", {}),
- featureCode=featureCode,
- mandateId=str(context.mandateId),
- featureInstanceId=None,
- isSystemRole=False # Never create system roles via import
- )
-
- createdRole = rootInterface.db.recordCreate(Role, newRole.model_dump())
- roleId = createdRole.get("id")
-
- # Create access rules
- for ruleData in roleData.get("accessRules", []):
- newRule = AccessRule(
- roleId=roleId,
- context=ruleData.get("context"),
- item=ruleData.get("item"),
- view=ruleData.get("view", False),
- read=ruleData.get("read"),
- create=ruleData.get("create"),
- update=ruleData.get("update"),
- delete=ruleData.get("delete")
- )
- rootInterface.db.recordCreate(AccessRule, newRule.model_dump())
- result.rulesCreated += 1
-
- result.rolesCreated += 1
-
- except Exception as e:
- result.errors.append(f"Error processing role '{roleData.get('roleLabel', 'unknown')}': {str(e)}")
-
- logger.info(
- f"User {context.user.id} imported mandate {context.mandateId} RBAC: "
- f"{result.rolesCreated} created, {result.rolesUpdated} updated, "
- f"{result.rolesSkipped} skipped"
- )
-
- return result
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error importing mandate RBAC: {e}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Failed to import RBAC: {str(e)}"
- )
-
-
-# =============================================================================
-# Helper Functions
-# =============================================================================
-
-def _hasMandateAdminRole(context: RequestContext) -> bool:
- """
- Check if the user has mandate admin role in the current context.
- """
- if context.hasSysAdminRole:
- return True
-
- if not context.roleIds:
- return False
-
- try:
- rootInterface = getRootInterface()
-
- for roleId in context.roleIds:
- role = rootInterface.getRole(roleId)
- if role:
- roleLabel = role.roleLabel
- # Admin role at mandate level
- if roleLabel == "admin" and not role.featureInstanceId:
- return True
-
- return False
-
- except Exception as e:
- logger.error(f"Error checking mandate admin role: {e}")
- return False
-
-
-def _updateAccessRules(interface, roleId: str, newRules: List[Dict[str, Any]]) -> int:
- """
- Update access rules for a role.
- Replaces existing rules with new ones.
-
- Returns:
- Number of rules created/updated
- """
- try:
- # Delete existing rules for this role using interface method
- existingRules = interface.getAccessRulesByRole(roleId)
- for rule in existingRules:
- interface.db.recordDelete(AccessRule, rule.id)
-
- # Create new rules
- count = 0
- for ruleData in newRules:
- newRule = AccessRule(
- roleId=roleId,
- context=ruleData.get("context"),
- item=ruleData.get("item"),
- view=ruleData.get("view", False),
- read=ruleData.get("read"),
- create=ruleData.get("create"),
- update=ruleData.get("update"),
- delete=ruleData.get("delete")
- )
- interface.db.recordCreate(AccessRule, newRule.model_dump())
- count += 1
-
- return count
-
- except Exception as e:
- logger.error(f"Error updating access rules: {e}")
- return 0
diff --git a/modules/routes/routeAdminRbacRules.py b/modules/routes/routeAdminRbacRules.py
index 16336fae..468bf21b 100644
--- a/modules/routes/routeAdminRbacRules.py
+++ b/modules/routes/routeAdminRbacRules.py
@@ -23,6 +23,8 @@ from modules.datamodels.datamodelRbac import AccessRuleContext, AccessRule, Role
from modules.datamodels.datamodelMembership import UserMandate
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.interfaces.interfaceDbApp import getInterface, getRootInterface
+from modules.shared.i18nRegistry import apiRouteContext, t, resolveText
+routeApiMsg = apiRouteContext("routeAdminRbacRules")
# Configure logger
logger = logging.getLogger(__name__)
@@ -113,7 +115,7 @@ def get_permissions(
if not interface.rbac:
raise HTTPException(
status_code=500,
- detail="RBAC interface not available"
+ detail=routeApiMsg("RBAC interface not available")
)
# MULTI-TENANT: Get permissions using context (mandateId/featureInstanceId)
@@ -189,7 +191,7 @@ def get_all_permissions(
if not interface.rbac:
raise HTTPException(
status_code=500,
- detail="RBAC interface not available"
+ detail=routeApiMsg("RBAC interface not available")
)
# Determine which contexts to fetch
@@ -363,7 +365,7 @@ def get_access_rules(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# Get interface - uses root interface for admin access
interface = getRootInterface()
@@ -488,11 +490,11 @@ def get_access_rules_by_role(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# MandateAdmin: verify role belongs to their mandates
if not isSysAdmin and not _isRoleInAdminMandates(roleId, adminMandateIds):
- raise HTTPException(status_code=403, detail="Access denied: role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: role not in your mandates"))
interface = getRootInterface()
@@ -535,7 +537,7 @@ def get_access_rule(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# Get interface - uses root interface for admin access
interface = getRootInterface()
@@ -550,7 +552,7 @@ def get_access_rule(
# MandateAdmin: verify rule's role belongs to their mandates
if not isSysAdmin and not _isRoleInAdminMandates(str(rule.roleId), adminMandateIds):
- raise HTTPException(status_code=403, detail="Access denied: rule's role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: rule's role not in your mandates"))
# Convert to dict for JSON serialization
return rule.model_dump()
@@ -586,7 +588,7 @@ def create_access_rule(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# Get interface - uses root interface for admin access
interface = getRootInterface()
@@ -621,7 +623,7 @@ def create_access_rule(
# MandateAdmin: verify the rule's role belongs to their mandates
if not isSysAdmin and accessRule.roleId:
if not _isRoleInAdminMandates(str(accessRule.roleId), adminMandateIds):
- raise HTTPException(status_code=403, detail="Access denied: role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: role not in your mandates"))
# Create rule
createdRule = interface.createAccessRule(accessRule)
@@ -666,7 +668,7 @@ def update_access_rule(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# Get interface - uses root interface for admin access
interface = getRootInterface()
@@ -681,7 +683,7 @@ def update_access_rule(
# MandateAdmin: verify existing rule's role belongs to their mandates
if not isSysAdmin and not _isRoleInAdminMandates(str(existingRule.roleId), adminMandateIds):
- raise HTTPException(status_code=403, detail="Access denied: rule's role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: rule's role not in your mandates"))
# Validate and parse access rule data
try:
@@ -754,7 +756,7 @@ def delete_access_rule(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# Get interface - uses root interface for admin access
interface = getRootInterface()
@@ -769,7 +771,7 @@ def delete_access_rule(
# MandateAdmin: verify rule's role belongs to their mandates
if not isSysAdmin and not _isRoleInAdminMandates(str(existingRule.roleId), adminMandateIds):
- raise HTTPException(status_code=403, detail="Access denied: rule's role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: rule's role not in your mandates"))
# Delete rule
success = interface.deleteAccessRule(ruleId)
@@ -835,7 +837,7 @@ def list_roles(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
interface = getRootInterface()
@@ -909,13 +911,13 @@ def list_roles(
result.append({
"id": role.id,
"roleLabel": role.roleLabel,
- "description": role.description,
+ "description": resolveText(role.description),
"mandateId": role.mandateId,
"featureInstanceId": role.featureInstanceId,
"featureCode": role.featureCode,
"userCount": roleCounts.get(str(role.id), 0),
"isSystemRole": role.isSystemRole,
- "scopeType": scopeType # Computed field for frontend display
+ "scopeType": scopeType
})
# MandateAdmin: filter to only roles in admin's mandates
@@ -929,14 +931,8 @@ def list_roles(
if searchTerm:
searchedResult = []
for item in result:
- # Search in roleLabel and description
roleLabel = (item.get("roleLabel") or "").lower()
- description = item.get("description")
- descText = ""
- if isinstance(description, dict):
- descText = " ".join(str(v) for v in description.values()).lower()
- elif description:
- descText = str(description).lower()
+ descText = (item.get("description") or "").lower()
scopeType = (item.get("scopeType") or "").lower()
if searchTerm in roleLabel or searchTerm in descText or searchTerm in scopeType:
@@ -1008,7 +1004,7 @@ def get_roles_filter_values(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
interface = getRootInterface()
dbRoles = interface.getAllRoles(pagination=None)
@@ -1044,7 +1040,7 @@ def get_roles_filter_values(
result.append({
"id": role.id,
"roleLabel": role.roleLabel,
- "description": role.description,
+ "description": resolveText(role.description),
"mandateId": role.mandateId,
"featureInstanceId": role.featureInstanceId,
"featureCode": role.featureCode,
@@ -1083,12 +1079,12 @@ def create_role(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
# MandateAdmin: can only create roles in their own mandates
if not isSysAdmin:
if not role.mandateId or str(role.mandateId) not in adminMandateIds:
- raise HTTPException(status_code=403, detail="Access denied: can only create roles in your own mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: can only create roles in your own mandates"))
interface = getRootInterface()
@@ -1099,7 +1095,7 @@ def create_role(
return {
"id": createdRole.id,
"roleLabel": createdRole.roleLabel,
- "description": createdRole.description,
+ "description": createdRole.description.model_dump() if hasattr(createdRole.description, 'model_dump') else createdRole.description,
"mandateId": createdRole.mandateId,
"featureInstanceId": createdRole.featureInstanceId,
"featureCode": createdRole.featureCode,
@@ -1142,7 +1138,7 @@ def get_role(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
interface = getRootInterface()
@@ -1156,12 +1152,12 @@ def get_role(
# MandateAdmin: verify role belongs to their mandates
if not isSysAdmin:
if not role.mandateId or str(role.mandateId) not in adminMandateIds:
- raise HTTPException(status_code=403, detail="Access denied: role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: role not in your mandates"))
return {
"id": role.id,
"roleLabel": role.roleLabel,
- "description": role.description,
+ "description": resolveText(role.description),
"mandateId": role.mandateId,
"featureInstanceId": role.featureInstanceId,
"featureCode": role.featureCode,
@@ -1203,7 +1199,7 @@ def update_role(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
interface = getRootInterface()
@@ -1213,9 +1209,9 @@ def update_role(
if not existingRole:
raise HTTPException(status_code=404, detail=f"Role {roleId} not found")
if existingRole.isSystemRole and not existingRole.mandateId:
- raise HTTPException(status_code=403, detail="Access denied: cannot modify template/system roles")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: cannot modify template/system roles"))
if not existingRole.mandateId or str(existingRole.mandateId) not in adminMandateIds:
- raise HTTPException(status_code=403, detail="Access denied: role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: role not in your mandates"))
updatedRole = interface.updateRole(roleId, role)
@@ -1224,7 +1220,7 @@ def update_role(
return {
"id": updatedRole.id,
"roleLabel": updatedRole.roleLabel,
- "description": updatedRole.description,
+ "description": updatedRole.description.model_dump() if hasattr(updatedRole.description, 'model_dump') else updatedRole.description,
"mandateId": updatedRole.mandateId,
"featureInstanceId": updatedRole.featureInstanceId,
"featureCode": updatedRole.featureCode,
@@ -1267,7 +1263,7 @@ def delete_role(
isSysAdmin = reqContext.hasSysAdminRole
adminMandateIds = [] if isSysAdmin else _getAdminMandateIds(reqContext)
if not isSysAdmin and not adminMandateIds:
- raise HTTPException(status_code=403, detail="Admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Admin role required"))
interface = getRootInterface()
@@ -1277,9 +1273,9 @@ def delete_role(
if not existingRole:
raise HTTPException(status_code=404, detail=f"Role {roleId} not found")
if existingRole.isSystemRole and not existingRole.mandateId:
- raise HTTPException(status_code=403, detail="Access denied: cannot delete template/system roles")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: cannot delete template/system roles"))
if not existingRole.mandateId or str(existingRole.mandateId) not in adminMandateIds:
- raise HTTPException(status_code=403, detail="Access denied: role not in your mandates")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied: role not in your mandates"))
success = interface.deleteRole(roleId)
if not success:
@@ -1360,6 +1356,12 @@ def getCatalogObjects(
except Exception as e:
logger.warning(f"Could not get active features for mandate {mandateId}: {e}")
+ def _resolveLabels(objects: list) -> list:
+ for obj in objects:
+ resolved = resolveText(obj.get("label"))
+ obj["label"] = resolved if resolved else f"[{obj.get('objectKey', '?')}]"
+ return objects
+
if context:
# Single context filter
try:
@@ -1381,7 +1383,7 @@ def getCatalogObjects(
if activeFeatures:
objects = [obj for obj in objects if obj.get("featureCode") in activeFeatures]
- return {context.upper(): objects}
+ return {context.upper(): _resolveLabels(objects)}
else:
# All contexts
result = catalog.getAllCatalogObjects(featureCode)
@@ -1391,6 +1393,8 @@ def getCatalogObjects(
for ctxKey in result:
result[ctxKey] = [obj for obj in result[ctxKey] if obj.get("featureCode") in activeFeatures]
+ for ctxKey in result:
+ _resolveLabels(result[ctxKey])
return result
except HTTPException:
@@ -1403,32 +1407,6 @@ def getCatalogObjects(
)
-@router.get("/catalog/stats", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-def getCatalogStats(
- request: Request,
- currentUser: User = Depends(requireSysAdminRole)
-) -> Dict[str, Any]:
- """
- Get statistics about the RBAC catalog.
-
- Returns:
- - Statistics about registered features, objects, and roles
- """
- try:
- from modules.security.rbacCatalog import getCatalogService
-
- catalog = getCatalogService()
- return catalog.getCatalogStats()
-
- except Exception as e:
- logger.error(f"Error getting catalog stats: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Failed to get catalog stats: {str(e)}"
- )
-
-
# =============================================================================
# CLEANUP: Remove duplicate AccessRules
# =============================================================================
diff --git a/modules/routes/routeAdminUserAccessOverview.py b/modules/routes/routeAdminUserAccessOverview.py
index 9b19fc41..ab04d085 100644
--- a/modules/routes/routeAdminUserAccessOverview.py
+++ b/modules/routes/routeAdminUserAccessOverview.py
@@ -15,7 +15,7 @@ import logging
from modules.auth import limiter
from modules.auth.authentication import getRequestContext, RequestContext
from modules.datamodels.datamodelUam import User, UserInDB
-from modules.datamodels.datamodelRbac import Role, AccessRule, AccessRuleContext
+from modules.datamodels.datamodelRbac import Role, AccessRule
from modules.datamodels.datamodelMembership import (
UserMandate,
UserMandateRole,
@@ -24,6 +24,8 @@ from modules.datamodels.datamodelMembership import (
)
from modules.datamodels.datamodelFeatures import FeatureInstance, Feature
from modules.interfaces.interfaceDbApp import getRootInterface
+from modules.shared.i18nRegistry import apiRouteContext, t, resolveText
+routeApiMsg = apiRouteContext("routeAdminUserAccessOverview")
# Configure logger
logger = logging.getLogger(__name__)
@@ -116,7 +118,7 @@ def listUsersForOverview(
- List of user dictionaries with basic info
"""
if not _hasMandateAdminRole(context):
- raise HTTPException(status_code=403, detail="Keine Berechtigung für die Benutzerzugriffsübersicht")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Keine Berechtigung für die Benutzerzugriffsübersicht"))
try:
interface = getRootInterface()
@@ -209,7 +211,7 @@ def getUserAccessOverview(
- Resource access (what resources the user can use)
"""
if not _hasMandateAdminRole(context):
- raise HTTPException(status_code=403, detail="Keine Berechtigung für die Benutzerzugriffsübersicht")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Keine Berechtigung für die Benutzerzugriffsübersicht"))
try:
interface = getRootInterface()
@@ -239,7 +241,7 @@ def getUserAccessOverview(
break
if not userInAdminMandate:
- raise HTTPException(status_code=403, detail="Benutzer gehört nicht zu Ihrem Mandate")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Benutzer gehört nicht zu Ihrem Mandate"))
# Get user
user = interface.getUser(userId)
@@ -296,7 +298,7 @@ def getUserAccessOverview(
roleInfo = {
"id": roleId,
"roleLabel": role.roleLabel,
- "description": role.description or {},
+ "description": resolveText(role.description),
"scope": scope,
"scopePriority": _getRoleScopePriority(scope),
"mandateId": role.mandateId,
@@ -332,7 +334,7 @@ def getUserAccessOverview(
# Get feature info using interface method
featureCode = instance.featureCode
feature = interface.getFeatureByCode(featureCode)
- featureLabel = feature.label if feature else {}
+ featureLabel = resolveText(feature.label) if feature and feature.label else ""
# Get roles for this FeatureAccess using interface method
instanceRoleIds = interface.getRoleIdsForFeatureAccess(faId)
@@ -346,14 +348,14 @@ def getUserAccessOverview(
roleInfo = {
"id": roleId,
"roleLabel": role.roleLabel,
- "description": role.description or {},
- "scope": scope,
- "scopePriority": _getRoleScopePriority(scope),
- "mandateId": role.mandateId,
- "featureInstanceId": role.featureInstanceId,
- "source": "featureInstance",
- "sourceInstanceId": faInstanceId,
- "sourceInstanceLabel": instance.label,
+ "description": resolveText(role.description),
+ "scope": scope,
+ "scopePriority": _getRoleScopePriority(scope),
+ "mandateId": role.mandateId,
+ "featureInstanceId": role.featureInstanceId,
+ "source": "featureInstance",
+ "sourceInstanceId": faInstanceId,
+ "sourceInstanceLabel": instance.label,
}
allRoles.append(roleInfo)
roleIdToInfo[roleId] = roleInfo
@@ -496,120 +498,3 @@ def getUserAccessOverview(
detail=f"Failed to get user access overview: {str(e)}"
)
-
-@router.get("/{userId}/effective-permissions", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-def getEffectivePermissions(
- request: Request,
- userId: str = Path(..., description="User ID"),
- mandateId: str = Query(..., description="Mandate ID context"),
- featureInstanceId: Optional[str] = Query(None, description="Feature instance ID context"),
- accessContext: str = Query("DATA", alias="context", description="Context type: DATA, UI, or RESOURCE"),
- item: Optional[str] = Query(None, description="Specific item to check permissions for"),
- context: RequestContext = Depends(getRequestContext)
-) -> Dict[str, Any]:
- """
- Get effective (resolved) permissions for a user in a specific context.
- This uses the RBAC resolution logic to show what permissions actually apply.
- MULTI-TENANT: SysAdmin sees all. MandateAdmin can check users in their own mandates.
-
- Path Parameters:
- - userId: User ID
-
- Query Parameters:
- - mandateId: Required mandate context
- - featureInstanceId: Optional feature instance context
- - context: Permission context (DATA, UI, RESOURCE)
- - item: Optional specific item to check
-
- Returns:
- - Effective permissions after RBAC resolution
- """
- if not context.hasSysAdminRole:
- # Check if user has admin role in any mandate
- if not _hasMandateAdminRole(context):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required")
-
- try:
- interface = getRootInterface()
-
- # MandateAdmin: verify the requested user shares at least one admin mandate
- if not context.hasSysAdminRole:
- adminMandateIds = []
- adminUserMandates = interface.getUserMandates(str(context.user.id))
- for um in adminUserMandates:
- umId = getattr(um, 'id', None)
- mid = getattr(um, 'mandateId', None)
- if not umId or not mid:
- continue
- roleIds = interface.getRoleIdsForUserMandate(str(umId))
- for roleId in roleIds:
- role = interface.getRole(roleId)
- if role and role.roleLabel == "admin" and not role.featureInstanceId:
- adminMandateIds.append(str(mid))
- break
-
- if not adminMandateIds:
- raise HTTPException(status_code=403, detail="Insufficient permissions")
-
- userInAdminMandate = False
- for mid in adminMandateIds:
- if _isUserInMandate(interface, userId, mid):
- userInAdminMandate = True
- break
-
- if not userInAdminMandate:
- raise HTTPException(status_code=403, detail="Benutzer gehört nicht zu Ihrem Mandate")
-
- # Get user
- user = interface.getUser(userId)
- if not user:
- raise HTTPException(
- status_code=404,
- detail=f"User {userId} not found"
- )
-
- # Convert context string to enum
- try:
- contextEnum = AccessRuleContext(accessContext)
- except ValueError:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid context: {accessContext}. Must be DATA, UI, or RESOURCE."
- )
-
- # Use RBAC interface to get actual permissions
- from modules.security.rbac import RbacClass
- rbac = RbacClass(interface.db, dbApp=interface.db)
-
- permissions = rbac.getUserPermissions(
- user=user,
- context=contextEnum,
- item=item or "",
- mandateId=mandateId,
- featureInstanceId=featureInstanceId
- )
-
- return {
- "userId": userId,
- "mandateId": mandateId,
- "featureInstanceId": featureInstanceId,
- "context": accessContext,
- "item": item,
- "effectivePermissions": {
- "view": permissions.view,
- "read": _getAccessLevelLabel(permissions.read.value if permissions.read else None),
- "create": _getAccessLevelLabel(permissions.create.value if permissions.create else None),
- "update": _getAccessLevelLabel(permissions.update.value if permissions.update else None),
- "delete": _getAccessLevelLabel(permissions.delete.value if permissions.delete else None),
- }
- }
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting effective permissions: {str(e)}")
- raise HTTPException(
- status_code=500,
- detail=f"Failed to get effective permissions: {str(e)}"
- )
diff --git a/modules/routes/routeAttributes.py b/modules/routes/routeAttributes.py
index e877e512..0118fecf 100644
--- a/modules/routes/routeAttributes.py
+++ b/modules/routes/routeAttributes.py
@@ -9,6 +9,9 @@ from modules.auth import limiter
# Import the attribute definition and helper functions
from modules.shared.attributeUtils import getModelClasses, getModelAttributeDefinitions, AttributeResponse, AttributeDefinition
+from modules.shared.i18nRegistry import apiRouteContext, _CURRENT_LANGUAGE
+
+routeApiMsg = apiRouteContext("routeAttributes")
# Configure logger
logger = logging.getLogger(__name__)
@@ -42,14 +45,15 @@ def get_entity_attributes(
# Check if entity type is known
if entityType not in modelClasses:
raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Entity type '{entityType}' not found."
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=routeApiMsg("Entitätstyp nicht gefunden.") + f" ({entityType})",
)
# Get model class and derive attributes from it
modelClass = modelClasses[entityType]
+ userLanguage = _CURRENT_LANGUAGE.get()
try:
- attribute_defs = getModelAttributeDefinitions(modelClass)
+ attribute_defs = getModelAttributeDefinitions(modelClass, userLanguage=userLanguage)
except Exception as e:
logger.error(f"Error getting attribute definitions for {entityType}: {str(e)}", exc_info=True)
raise HTTPException(
diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py
index 5029e485..800f106d 100644
--- a/modules/routes/routeBilling.py
+++ b/modules/routes/routeBilling.py
@@ -38,6 +38,9 @@ from modules.datamodels.datamodelBilling import (
BillingStatisticsChartData,
BillingCheckResult,
)
+from modules.shared.i18nRegistry import apiRouteContext
+
+routeApiMsg = apiRouteContext("routeBilling")
# Configure logger
logger = logging.getLogger(__name__)
@@ -179,47 +182,6 @@ def _isMemberOfMandate(ctx: RequestContext, targetMandateId: str) -> bool:
return False
-def _filterTransactionsByScope(transactions: list, scope: BillingDataScope) -> list:
- """
- Filter a list of transaction dicts based on the user's BillingDataScope.
-
- Rules:
- - SysAdmin: no filter
- - Mandate-Admin: all transactions in their admin mandates
- - Feature-Instance-Admin: transactions for their admin feature instances
- - Regular user: only transactions where createdByUserId/userId matches
- """
- if scope.isGlobalAdmin:
- return transactions
-
- adminMandateSet = set(scope.adminMandateIds)
- adminFiSet = set(scope.adminFeatureInstanceIds)
- memberMandateSet = set(scope.memberMandateIds)
-
- result = []
- for t in transactions:
- mandateId = t.get("mandateId")
- fiId = t.get("featureInstanceId")
- txUserId = t.get("createdByUserId") or t.get("userId")
-
- # Mandate admin → sees all transactions in their mandate
- if mandateId and mandateId in adminMandateSet:
- result.append(t)
- continue
-
- # Feature instance admin → sees all transactions for their instances
- if fiId and fiId in adminFiSet:
- result.append(t)
- continue
-
- # Regular member → only own transactions
- if mandateId and mandateId in memberMandateSet:
- if txUserId and txUserId == scope.userId:
- result.append(t)
- continue
-
- return result
-
# =============================================================================
# Request/Response Models
@@ -378,9 +340,9 @@ def _creditStripeSessionIfNeeded(
amount_chf_str = metadata.get("amountChf", "0")
if not session_id:
- raise HTTPException(status_code=400, detail="Stripe session id missing")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Stripe session id missing"))
if not mandate_id:
- raise HTTPException(status_code=400, detail="Invalid session metadata: mandateId missing")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid session metadata: mandateId missing"))
existing_payment_tx = billingInterface.getPaymentTransactionByReferenceId(session_id)
if existing_payment_tx:
@@ -404,11 +366,11 @@ def _creditStripeSessionIfNeeded(
if amount_total is not None:
amount_chf = amount_total / 100.0
else:
- raise HTTPException(status_code=400, detail="Invalid amount in Stripe session")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid amount in Stripe session"))
settings = billingInterface.getSettings(mandate_id)
if not settings:
- raise HTTPException(status_code=404, detail="Billing settings not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Billing settings not found"))
account = billingInterface.getOrCreateMandateAccount(mandate_id, initialBalance=0.0)
@@ -578,10 +540,10 @@ def getStatistics(
try:
# Validate period
if period not in ["day", "month", "year"]:
- raise HTTPException(status_code=400, detail="Invalid period. Use 'day', 'month', or 'year'")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid period. Use 'day', 'month', or 'year'"))
if period == "day" and not month:
- raise HTTPException(status_code=400, detail="Month is required for 'day' period")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Month is required for 'day' period"))
billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
settings = billingInterface.getSettings(ctx.mandateId)
@@ -683,13 +645,13 @@ def getSettingsAdmin(
Access: SysAdmin (any mandate) or MandateAdmin (own mandate).
"""
if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Admin role required for this mandate"))
try:
billingInterface = getBillingInterface(ctx.user, targetMandateId)
settings = billingInterface.getSettings(targetMandateId)
if not settings:
- raise HTTPException(status_code=404, detail="Billing settings not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Billing settings not found"))
return settings
@@ -713,7 +675,7 @@ def createOrUpdateSettings(
Access: SysAdmin (any mandate) or MandateAdmin (own mandate).
"""
if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Admin role required for this mandate"))
try:
billingInterface = getBillingInterface(ctx.user, targetMandateId)
existingSettings = billingInterface.getSettings(targetMandateId)
@@ -783,12 +745,12 @@ def addCredit(
settings = billingInterface.getSettings(targetMandateId)
if not settings:
- raise HTTPException(status_code=404, detail="Billing settings not found for this mandate")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Billing settings not found for this mandate"))
account = billingInterface.getOrCreateMandateAccount(targetMandateId, initialBalance=0.0)
if creditRequest.amount == 0:
- raise HTTPException(status_code=400, detail="Amount must not be zero")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Amount must not be zero"))
from modules.datamodels.datamodelBilling import BillingTransaction
@@ -835,10 +797,10 @@ def createCheckoutSession(
settings = billingInterface.getSettings(targetMandateId)
if not settings:
- raise HTTPException(status_code=404, detail="Billing settings not found for this mandate")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Billing settings not found for this mandate"))
if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=403, detail="Mandate admin role required to load mandate credit")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Mandate admin role required to load mandate credit"))
from modules.serviceCenter.services.serviceBilling.stripeCheckout import create_checkout_session
redirect_url = create_checkout_session(
@@ -873,7 +835,7 @@ def confirmCheckoutSession(
stripe = _getStripeClient()
session = stripe.checkout.Session.retrieve(confirmRequest.sessionId)
if not session:
- raise HTTPException(status_code=404, detail="Stripe Checkout Session not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Stripe Checkout Session not found"))
from modules.shared.stripeClient import stripeToDict
session_dict = stripeToDict(session)
@@ -882,7 +844,7 @@ def confirmCheckoutSession(
user_id = metadata.get("userId") or None
if not mandate_id:
- raise HTTPException(status_code=400, detail="Invalid session metadata: mandateId missing")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid session metadata: mandateId missing"))
payment_status = session_dict.get("payment_status")
if payment_status != "paid":
@@ -891,10 +853,10 @@ def confirmCheckoutSession(
billingInterface = getBillingInterface(ctx.user, mandate_id)
settings = billingInterface.getSettings(mandate_id)
if not settings:
- raise HTTPException(status_code=404, detail="Billing settings not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Billing settings not found"))
if not _isAdminOfMandate(ctx, mandate_id):
- raise HTTPException(status_code=403, detail="Mandate admin role required")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Mandate admin role required"))
root_billing_interface = _getRootInterface()
return _creditStripeSessionIfNeeded(root_billing_interface, session_dict, eventId=None)
@@ -921,10 +883,10 @@ async def stripeWebhook(
webhook_secret = APP_CONFIG.get("STRIPE_WEBHOOK_SECRET")
if not webhook_secret:
logger.error("STRIPE_WEBHOOK_SECRET not configured")
- raise HTTPException(status_code=500, detail="Webhook not configured")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Webhook not configured"))
if not stripe_signature:
- raise HTTPException(status_code=400, detail="Missing Stripe-Signature header")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Missing Stripe-Signature header"))
payload = await request.body()
@@ -935,10 +897,10 @@ async def stripeWebhook(
)
except ValueError as e:
logger.warning(f"Stripe webhook invalid payload: {e}")
- raise HTTPException(status_code=400, detail="Invalid payload")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid payload"))
except Exception as e:
logger.warning(f"Stripe webhook signature verification failed: {e}")
- raise HTTPException(status_code=400, detail="Invalid signature")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid signature"))
logger.info(f"Stripe webhook received: event={event.id}, type={event.type}")
@@ -1284,7 +1246,7 @@ def getAccounts(
Access: SysAdmin (any mandate) or MandateAdmin (own mandate).
"""
if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Admin role required for this mandate"))
try:
billingInterface = getBillingInterface(ctx.user, targetMandateId)
@@ -1332,7 +1294,7 @@ def getUsersForMandate(
Used by billing admin to select users for credit assignment.
"""
if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Admin role required for this mandate"))
try:
from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
@@ -1429,32 +1391,20 @@ def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]:
return result
-def _buildTransactionsList(ctx: RequestContext, targetMandateId: str) -> List[Dict[str, Any]]:
- """Build the full enriched transactions list for a mandate."""
+def _buildTransactionsList(ctx: RequestContext, targetMandateId: str, paginationParams: Optional[PaginationParams] = None) -> tuple:
+ """Build enriched transactions for a mandate. Returns (items, paginatedResult|None)."""
billingInterface = getBillingInterface(ctx.user, targetMandateId)
- transactions = billingInterface.getTransactionsByMandate(targetMandateId, limit=5000)
- result = []
- for t in transactions:
- row = TransactionResponse(
- id=t.get("id"),
- accountId=t.get("accountId"),
- transactionType=TransactionTypeEnum(t.get("transactionType", "DEBIT")),
- amount=t.get("amount", 0.0),
- description=t.get("description", ""),
- referenceType=ReferenceTypeEnum(t["referenceType"]) if t.get("referenceType") else None,
- workflowId=t.get("workflowId"),
- featureCode=t.get("featureCode"),
- featureInstanceId=t.get("featureInstanceId"),
- aicoreProvider=t.get("aicoreProvider"),
- aicoreModel=t.get("aicoreModel"),
- createdByUserId=t.get("createdByUserId"),
- createdAt=t.get("sysCreatedAt")
- )
- result.append(row.model_dump())
+ if paginationParams:
+ paginatedResult = billingInterface.getTransactionsByMandate(targetMandateId, pagination=paginationParams)
+ transactions = paginatedResult.items if hasattr(paginatedResult, 'items') else paginatedResult.get("items", [])
+ else:
+ defaultPagination = PaginationParams(page=1, pageSize=200, sort=[{"field": "sysCreatedAt", "direction": "desc"}])
+ paginatedResult = billingInterface.getTransactionsByMandate(targetMandateId, pagination=defaultPagination)
+ transactions = paginatedResult.items if hasattr(paginatedResult, 'items') else paginatedResult.get("items", [])
- _attachCreatedByUserNamesToTransactionRows(result)
- return result
+ result = _enrichTransactionRows(transactions)
+ return result, paginatedResult
@router.get("/admin/transactions/{targetMandateId}")
@@ -1463,12 +1413,11 @@ def getTransactionsAdmin(
request: Request,
targetMandateId: str = Path(..., description="Mandate ID"),
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
- limit: int = Query(default=100, ge=1, le=1000),
ctx: RequestContext = Depends(getRequestContext),
):
"""Get all transactions for a mandate with pagination support."""
if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Admin role required for this mandate"))
try:
paginationParams: Optional[PaginationParams] = None
if pagination:
@@ -1480,26 +1429,22 @@ def getTransactionsAdmin(
except (json.JSONDecodeError, ValueError) as e:
raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
- if paginationParams:
- # DB-level pagination — enrich only the returned page
- billingInterface = getBillingInterface(ctx.user, targetMandateId)
- result = billingInterface.getTransactionsByMandate(targetMandateId, pagination=paginationParams)
- transactions = result.items if hasattr(result, 'items') else result
- enrichedItems = _enrichTransactionRows(transactions)
- return {
- "items": enrichedItems,
- "pagination": PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=result.totalItems if hasattr(result, 'totalItems') else len(enrichedItems),
- totalPages=result.totalPages if hasattr(result, 'totalPages') else 0,
- sort=paginationParams.sort,
- filters=paginationParams.filters,
- ).model_dump(),
- }
+ enriched, paginatedResult = _buildTransactionsList(ctx, targetMandateId, paginationParams)
+ totalItems = getattr(paginatedResult, 'totalItems', len(enriched)) if paginatedResult else len(enriched)
+ totalPages = getattr(paginatedResult, 'totalPages', 0) if paginatedResult else 0
- enriched = _buildTransactionsList(ctx, targetMandateId)
- return {"items": enriched, "pagination": None}
+ paginationMeta = None
+ if paginationParams:
+ paginationMeta = PaginationMetadata(
+ currentPage=paginationParams.page,
+ pageSize=paginationParams.pageSize,
+ totalItems=totalItems,
+ totalPages=totalPages,
+ sort=paginationParams.sort,
+ filters=paginationParams.filters,
+ ).model_dump()
+
+ return {"items": enriched, "pagination": paginationMeta}
except HTTPException:
raise
@@ -1508,50 +1453,6 @@ def getTransactionsAdmin(
raise HTTPException(status_code=500, detail=str(e))
-@router.get("/admin/transactions/{targetMandateId}/filter-values")
-@limiter.limit("60/minute")
-def getTransactionFilterValues(
- request: Request,
- targetMandateId: str = Path(..., description="Mandate ID"),
- column: str = Query(..., description="Column key"),
- pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
- ctx: RequestContext = Depends(getRequestContext),
-):
- """Return distinct filter values for a column in mandate transactions."""
- if not _isAdminOfMandate(ctx, targetMandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required for this mandate")
- try:
- crossFilterParams: Optional[PaginationParams] = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- filters = paginationDict.get("filters", {})
- filters.pop(column, None)
- paginationDict["filters"] = filters
- paginationDict.pop("sort", None)
- crossFilterParams = PaginationParams(**paginationDict)
- except (json.JSONDecodeError, ValueError):
- pass
-
- # Try SQL DISTINCT for native DB columns; fallback to in-memory for enriched columns (e.g. userName)
- try:
- rootBillingInterface = _getRootInterface()
- recordFilter = {"mandateId": targetMandateId}
- values = rootBillingInterface.db.getDistinctColumnValues(
- BillingTransaction, column, crossFilterParams, recordFilter
- )
- return sorted(values, key=lambda v: str(v).lower())
- except Exception:
- enriched = _buildTransactionsList(ctx, targetMandateId)
- crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams)
- return _extractDistinctValues(crossFiltered, column)
- except Exception as e:
- logger.error(f"Error getting filter values for transactions: {e}")
- raise HTTPException(status_code=500, detail=str(e))
-
-
# =============================================================================
# Mandate View Endpoints (for Admins)
# =============================================================================
@@ -1689,6 +1590,7 @@ def getUserViewStatistics(
month: Optional[int] = Query(None, description="Month (1-12, required for period='day')"),
scope: str = Query(default="all", description="Scope: 'personal' (own costs only), 'mandate' (filter by mandateId), 'all' (RBAC-filtered)"),
mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"),
+ onlyMine: Optional[bool] = Query(None, description="Additional filter: restrict to current user's transactions within the selected scope"),
ctx: RequestContext = Depends(getRequestContext)
) -> ViewStatisticsResponse:
"""
@@ -1699,24 +1601,23 @@ def getUserViewStatistics(
- mandate: transactions for a specific mandate (requires mandateId parameter)
- all: RBAC-filtered (SysAdmin sees everything, admin sees mandate, user sees own)
+ onlyMine: additional filter that restricts results to the current user's
+ transactions while keeping the scope-based mandate selection.
+
- period='month': returns monthly time series for the given year
- period='day': returns daily time series for the given month/year
"""
try:
- from datetime import timedelta
-
if year is None:
year = datetime.now().year
-
+
if period == "day" and not month:
month = datetime.now().month
-
+
billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
-
- # Evaluate RBAC scope
+
rbacScope = _getBillingDataScope(ctx.user)
-
- # Determine mandate IDs for data loading
+
if rbacScope.isGlobalAdmin:
loadMandateIds = None
else:
@@ -1724,151 +1625,75 @@ def getUserViewStatistics(
if not loadMandateIds:
logger.warning("No mandate IDs found for user")
return ViewStatisticsResponse()
-
- # Scope=mandate: restrict to specific mandate
+
if scope == "mandate" and mandateId:
loadMandateIds = [mandateId]
-
- # Get all transactions
- allTransactions = billingInterface.getUserTransactionsForMandates(loadMandateIds, limit=10000)
-
- # Apply RBAC filter (respects admin/user roles)
- allTransactions = _filterTransactionsByScope(allTransactions, rbacScope)
-
- # Scope=personal: further filter to only own transactions
- if scope == "personal":
- userId = str(ctx.user.id)
- allTransactions = [
- t for t in allTransactions
- if (t.get("createdByUserId") or t.get("userId")) == userId
- ]
-
- logger.info(f"View statistics: {len(allTransactions)} RBAC-filtered transactions for period={period}, year={year}, month={month}")
-
- # Calculate date range
+
+ personalUserId = str(ctx.user.id) if (scope == "personal" or onlyMine) else None
+
if period == "day":
startDate = date(year, month, 1)
- if month == 12:
- endDate = date(year + 1, 1, 1)
- else:
- endDate = date(year, month + 1, 1)
+ endDate = date(year + 1, 1, 1) if month == 12 else date(year, month + 1, 1)
else:
startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1)
-
- # Filter by date range and only DEBIT transactions
- debits = []
- skippedNoDate = 0
- skippedDateRange = 0
- skippedNotDebit = 0
-
- for t in allTransactions:
- createdAt = t.get("sysCreatedAt")
- if not createdAt:
- skippedNoDate += 1
- continue
-
- # Parse date from various formats (DB stores as DOUBLE PRECISION / Unix timestamp)
- txDate = None
- if isinstance(createdAt, (int, float)):
- txDate = datetime.fromtimestamp(createdAt).date()
- elif isinstance(createdAt, datetime):
- txDate = createdAt.date()
- elif isinstance(createdAt, date) and not isinstance(createdAt, datetime):
- txDate = createdAt
- elif isinstance(createdAt, str):
- try:
- # Try as float string first (Unix timestamp)
- txDate = datetime.fromtimestamp(float(createdAt)).date()
- except (ValueError, TypeError):
- try:
- txDate = datetime.fromisoformat(createdAt.replace("Z", "+00:00")).date()
- except (ValueError, TypeError):
- skippedNoDate += 1
- continue
- else:
- skippedNoDate += 1
- continue
-
- if txDate < startDate or txDate >= endDate:
- skippedDateRange += 1
- continue
-
- # Compare transactionType - handle both string and enum
- txType = t.get("transactionType")
- txTypeStr = str(txType) if txType is not None else ""
- if txTypeStr != "DEBIT" and txTypeStr != "TransactionTypeEnum.DEBIT":
- # Also check .value for enum objects
- txTypeValue = getattr(txType, 'value', txTypeStr)
- if txTypeValue != "DEBIT":
- skippedNotDebit += 1
- continue
-
- t["_txDate"] = txDate
- debits.append(t)
-
- logger.info(f"View statistics: {len(debits)} DEBIT transactions after filter. "
- f"Skipped: noDate={skippedNoDate}, dateRange={skippedDateRange}, notDebit={skippedNotDebit}")
-
- # Aggregate totals
- totalCost = sum(t.get("amount", 0) for t in debits)
-
- costByProvider: Dict[str, float] = {}
- costByModel: Dict[str, float] = {}
- costByFeature: Dict[str, float] = {}
+
+ startTs = datetime.combine(startDate, datetime.min.time()).timestamp()
+ endTs = datetime.combine(endDate, datetime.min.time()).timestamp()
+
+ agg = billingInterface.getTransactionStatisticsAggregated(
+ mandateIds=loadMandateIds,
+ scope=scope,
+ userId=personalUserId,
+ startTs=startTs,
+ endTs=endTs,
+ period=period,
+ )
+
+ logger.info(
+ f"View statistics (SQL-aggregated): totalCost={agg['totalCost']}, "
+ f"count={agg['transactionCount']}, period={period}, year={year}, month={month}"
+ )
+
+ allAccounts = agg.get("_allAccounts", [])
+ accountToMandate: Dict[str, str] = {}
+ for acc in allAccounts:
+ accountToMandate[acc.get("id", "")] = acc.get("mandateId", "")
+
+ from modules.interfaces.interfaceDbApp import getInterface as getAppInterface
+ mandateIdsForLookup = list(set(accountToMandate.values()))
+ mandateMap: Dict[str, str] = {}
+ if mandateIdsForLookup:
+ rootIface = getAppInterface(ctx.user)
+ mandatesById = rootIface.getMandatesByIds(mandateIdsForLookup)
+ for mid, m in mandatesById.items():
+ mandateMap[mid] = getattr(m, "name", mid) or mid
+
+ def _mandateName(accountId: str) -> str:
+ mid = accountToMandate.get(accountId, "")
+ return mandateMap.get(mid, mid or "unknown")
+
costByMandate: Dict[str, float] = {}
-
- for t in debits:
- provider = t.get("aicoreProvider") or "unknown"
- costByProvider[provider] = costByProvider.get(provider, 0) + t.get("amount", 0)
-
- model = t.get("aicoreModel") or "unknown"
- costByModel[model] = costByModel.get(model, 0) + t.get("amount", 0)
-
- mandate = t.get("mandateName") or t.get("mandateId") or "unknown"
- featureCode = t.get("featureCode") or "unknown"
- featureKey = f"{mandate} / {featureCode}"
- costByFeature[featureKey] = costByFeature.get(featureKey, 0) + t.get("amount", 0)
-
- mandate = t.get("mandateName") or t.get("mandateId") or "unknown"
- costByMandate[mandate] = costByMandate.get(mandate, 0) + t.get("amount", 0)
-
- # Build time series (raw data only, no display logic)
- timeSeries = []
- if period == "day":
- numDays = (endDate - startDate).days
- for day in range(numDays):
- d = startDate + timedelta(days=day)
- dayCost = sum(t.get("amount", 0) for t in debits if t["_txDate"] == d)
- dayCount = sum(1 for t in debits if t["_txDate"] == d)
- if dayCost > 0 or dayCount > 0:
- timeSeries.append({
- "date": d.isoformat(),
- "cost": round(dayCost, 4),
- "count": dayCount
- })
- else:
- for m in range(1, 13):
- mStart = date(year, m, 1)
- mEnd = date(year, m + 1, 1) if m < 12 else date(year + 1, 1, 1)
- monthCost = sum(t.get("amount", 0) for t in debits if mStart <= t["_txDate"] < mEnd)
- monthCount = sum(1 for t in debits if mStart <= t["_txDate"] < mEnd)
- timeSeries.append({
- "date": f"{year}-{m:02d}",
- "cost": round(monthCost, 4),
- "count": monthCount
- })
-
+ for accId, total in agg.get("costByAccountId", {}).items():
+ name = _mandateName(accId)
+ costByMandate[name] = costByMandate.get(name, 0) + total
+
+ costByFeature: Dict[str, float] = {}
+ for entry in agg.get("costByAccountFeature", []):
+ name = _mandateName(entry["accountId"])
+ key = f"{name} / {entry['featureCode']}"
+ costByFeature[key] = costByFeature.get(key, 0) + entry["total"]
+
return ViewStatisticsResponse(
- totalCost=round(totalCost, 4),
- transactionCount=len(debits),
- costByProvider=costByProvider,
- costByModel=costByModel,
+ totalCost=agg["totalCost"],
+ transactionCount=agg["transactionCount"],
+ costByProvider=agg.get("costByProvider", {}),
+ costByModel=agg.get("costByModel", {}),
costByFeature=costByFeature,
costByMandate=costByMandate,
- timeSeries=timeSeries
+ timeSeries=agg.get("timeSeries", []),
)
-
+
except Exception as e:
logger.error(f"Error getting view statistics: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@@ -1879,77 +1704,66 @@ def getUserViewStatistics(
def getUserViewTransactions(
request: Request,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
+ scope: str = Query(default="all", description="Scope: 'personal' (own costs only), 'mandate' (filter by mandateId), 'all' (RBAC-filtered)"),
+ mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"),
+ onlyMine: Optional[bool] = Query(None, description="Additional filter: restrict to current user's transactions within the selected scope"),
ctx: RequestContext = Depends(getRequestContext)
) -> PaginatedResponse[UserTransactionResponse]:
"""
Get user-level transactions with pagination support.
- RBAC filtering:
- - SysAdmin: sees all user transactions across all mandates
- - Mandate-Admin: sees all user transactions for mandates they administrate
- - Feature-Instance-Admin: sees transactions for their feature instances
- - Regular user: sees only their own transactions
+ Scope (same contract as /view/statistics):
+ - personal: only the current user's own transactions (ignores admin role)
+ - mandate: transactions for a specific mandate (requires mandateId parameter)
+ - all: RBAC-filtered (SysAdmin sees everything, admin sees mandate, user sees own)
+
+ onlyMine: additional filter that restricts results to the current user's
+ transactions while keeping the scope-based mandate selection.
Query Parameters:
- pagination: JSON-encoded PaginationParams object, or None for no pagination
+ - scope: 'personal', 'mandate', or 'all'
+ - mandateId: required when scope='mandate'
+ - onlyMine: true to restrict to current user's data within the scope
"""
try:
billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
-
- # Parse pagination params
+
paginationParams = None
if pagination:
import json
paginationDict = json.loads(pagination)
paginationDict = normalize_pagination_dict(paginationDict)
paginationParams = PaginationParams(**paginationDict)
-
- # Evaluate RBAC scope
- scope = _getBillingDataScope(ctx.user)
-
- # Determine mandate IDs for data loading
- if scope.isGlobalAdmin:
- mandateIds = None # Load all
+
+ rbacScope = _getBillingDataScope(ctx.user)
+
+ if rbacScope.isGlobalAdmin:
+ loadMandateIds = None
else:
- # Load data for all mandates the user belongs to (admin + member)
- mandateIds = scope.adminMandateIds + scope.memberMandateIds
- if not mandateIds:
+ loadMandateIds = rbacScope.adminMandateIds + rbacScope.memberMandateIds
+ if not loadMandateIds:
return PaginatedResponse(items=[], pagination=None)
-
- allTransactions = billingInterface.getUserTransactionsForMandates(mandateIds, limit=10000)
-
- # Apply RBAC filter
- allTransactions = _filterTransactionsByScope(allTransactions, scope)
-
- logger.debug(f"RBAC-filtered {len(allTransactions)} transactions for user {ctx.user.id}")
-
- # Convert to response objects as dicts for filtering/sorting
- transactionDicts = []
- for t in allTransactions:
- transactionDicts.append({
- "id": t.get("id"),
- "accountId": t.get("accountId"),
- "transactionType": t.get("transactionType", "DEBIT"),
- "amount": t.get("amount", 0.0),
- "description": t.get("description", ""),
- "referenceType": t.get("referenceType"),
- "workflowId": t.get("workflowId"),
- "featureCode": t.get("featureCode"),
- "featureInstanceId": t.get("featureInstanceId"),
- "aicoreProvider": t.get("aicoreProvider"),
- "aicoreModel": t.get("aicoreModel"),
- "createdByUserId": t.get("createdByUserId"),
- "createdAt": t.get("sysCreatedAt"),
- "mandateId": t.get("mandateId"),
- "mandateName": t.get("mandateName"),
- "userId": t.get("userId"),
- "userName": t.get("userName"),
- })
-
- # Apply filters and sorting
- filteredDicts = _applyFiltersAndSort(transactionDicts, paginationParams)
-
- # Convert to response models
+
+ if scope == "mandate" and mandateId:
+ loadMandateIds = [mandateId]
+
+ effectiveScope = scope
+ personalUserId = str(ctx.user.id) if (scope == "personal" or onlyMine) else None
+
+ if not paginationParams:
+ paginationParams = PaginationParams(page=1, pageSize=50)
+
+ result = billingInterface.getTransactionsForMandatesPaginated(
+ mandateIds=loadMandateIds,
+ pagination=paginationParams,
+ scope=effectiveScope,
+ userId=personalUserId,
+ )
+
+ logger.debug(f"SQL-paginated {result.totalItems} transactions for user {ctx.user.id} "
+ f"(scope={scope}, mandateId={mandateId}, page={paginationParams.page})")
+
def _toResponse(d):
return UserTransactionResponse(
id=d.get("id"),
@@ -1964,38 +1778,25 @@ def getUserViewTransactions(
aicoreProvider=d.get("aicoreProvider"),
aicoreModel=d.get("aicoreModel"),
createdByUserId=d.get("createdByUserId"),
- createdAt=d.get("createdAt"),
+ createdAt=d.get("sysCreatedAt") or d.get("createdAt"),
mandateId=d.get("mandateId"),
mandateName=d.get("mandateName"),
userId=d.get("userId"),
userName=d.get("userName")
)
-
- if paginationParams:
- import math
- totalItems = len(filteredDicts)
- totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
- startIdx = (paginationParams.page - 1) * paginationParams.pageSize
- endIdx = startIdx + paginationParams.pageSize
- paginatedDicts = filteredDicts[startIdx:endIdx]
-
- return PaginatedResponse(
- items=[_toResponse(d) for d in paginatedDicts],
- pagination=PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=totalItems,
- totalPages=totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters
- )
+
+ return PaginatedResponse(
+ items=[_toResponse(d) for d in result.items],
+ pagination=PaginationMetadata(
+ currentPage=paginationParams.page,
+ pageSize=paginationParams.pageSize,
+ totalItems=result.totalItems,
+ totalPages=result.totalPages,
+ sort=paginationParams.sort,
+ filters=paginationParams.filters,
)
- else:
- return PaginatedResponse(
- items=[_toResponse(d) for d in filteredDicts],
- pagination=None
- )
-
+ )
+
except Exception as e:
logger.error(f"Error getting user view transactions: {e}")
raise HTTPException(status_code=500, detail=str(e))
@@ -2007,42 +1808,50 @@ def getUserViewTransactionsFilterValues(
request: Request,
column: str = Query(..., description="Column key"),
pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
+ scope: str = Query(default="all", description="Scope: 'personal', 'mandate', 'all'"),
+ mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"),
+ onlyMine: Optional[bool] = Query(None, description="Additional filter: restrict to current user's data within the selected scope"),
ctx: RequestContext = Depends(getRequestContext)
):
- """Return distinct filter values for a column in user transactions."""
+ """Return distinct filter values for a column in user transactions (SQL DISTINCT)."""
try:
billingInterface = getBillingInterface(ctx.user, ctx.mandateId)
- scope = _getBillingDataScope(ctx.user)
- if scope.isGlobalAdmin:
- mandateIds = None
+ rbacScope = _getBillingDataScope(ctx.user)
+
+ if rbacScope.isGlobalAdmin:
+ loadMandateIds = None
else:
- mandateIds = scope.adminMandateIds + scope.memberMandateIds
- if not mandateIds:
+ loadMandateIds = rbacScope.adminMandateIds + rbacScope.memberMandateIds
+ if not loadMandateIds:
return []
- allTransactions = billingInterface.getUserTransactionsForMandates(mandateIds, limit=10000)
- allTransactions = _filterTransactionsByScope(allTransactions, scope)
- transactionDicts = []
- for t in allTransactions:
- transactionDicts.append({
- "id": t.get("id"),
- "accountId": t.get("accountId"),
- "transactionType": t.get("transactionType", "DEBIT"),
- "amount": t.get("amount", 0.0),
- "description": t.get("description", ""),
- "referenceType": t.get("referenceType"),
- "workflowId": t.get("workflowId"),
- "featureCode": t.get("featureCode"),
- "featureInstanceId": t.get("featureInstanceId"),
- "aicoreProvider": t.get("aicoreProvider"),
- "aicoreModel": t.get("aicoreModel"),
- "createdByUserId": t.get("createdByUserId"),
- "createdAt": t.get("sysCreatedAt"),
- "mandateId": t.get("mandateId"),
- "mandateName": t.get("mandateName"),
- "userId": t.get("userId"),
- "userName": t.get("userName"),
- })
- return _handleFilterValuesRequest(transactionDicts, column, pagination)
+
+ if scope == "mandate" and mandateId:
+ loadMandateIds = [mandateId]
+
+ crossFilterParams = None
+ if pagination:
+ try:
+ import json
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ filters = paginationDict.get("filters", {})
+ filters.pop(column, None)
+ paginationDict["filters"] = filters
+ paginationDict.pop("sort", None)
+ crossFilterParams = PaginationParams(**paginationDict)
+ except (json.JSONDecodeError, ValueError):
+ pass
+
+ personalUserId = str(ctx.user.id) if (scope == "personal" or onlyMine) else None
+
+ return billingInterface.getTransactionDistinctValues(
+ mandateIds=loadMandateIds,
+ column=column,
+ pagination=crossFilterParams,
+ scope=scope,
+ userId=personalUserId,
+ )
except Exception as e:
logger.error(f"Error getting filter values for user transactions: {e}")
raise HTTPException(status_code=500, detail=str(e))
diff --git a/modules/routes/routeClickup.py b/modules/routes/routeClickup.py
index 1603fa23..ccf1c481 100644
--- a/modules/routes/routeClickup.py
+++ b/modules/routes/routeClickup.py
@@ -1,17 +1,18 @@
# Copyright (c) 2025 Patrick Motsch
# All rights reserved.
-"""ClickUp API routes — teams, hierarchy, lists, tasks (connection-scoped)."""
+"""ClickUp API routes — lists and tasks (connection-scoped). OAuth lives under /api/clickup/auth/* in routeSecurityClickup."""
import logging
from typing import Any, Dict, Optional
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request, status
-from pydantic import BaseModel
-
from modules.auth import getCurrentUser, limiter
from modules.datamodels.datamodelUam import AuthAuthority, User, UserConnection
from modules.interfaces.interfaceDbApp import getInterface
from modules.serviceHub import getInterface as getServices
+from modules.shared.i18nRegistry import apiRouteContext
+
+routeApiMsg = apiRouteContext("routeClickup")
logger = logging.getLogger(__name__)
@@ -42,12 +43,12 @@ def _getUserConnection(interface, connection_id: str, user_id: str) -> Optional[
def _clickup_connection_or_404(interface, connection_id: str, user_id: str) -> UserConnection:
connection = _getUserConnection(interface, connection_id, user_id)
if not connection:
- raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Connection not found")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("Connection not found"))
authority = connection.authority.value if hasattr(connection.authority, "value") else str(connection.authority)
if authority.lower() != AuthAuthority.CLICKUP.value:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Connection is not a ClickUp connection",
+ detail=routeApiMsg("Connection is not a ClickUp connection"),
)
return connection
@@ -57,27 +58,11 @@ def _svc_for_connection(current_user: User, connection: UserConnection):
if not services.clickup.setAccessTokenFromConnection(connection):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Failed to set ClickUp access token",
+ detail=routeApiMsg("Failed to set ClickUp access token"),
)
return services.clickup
-# --- Routes (prefix is /api/clickup; OAuth lives under /api/clickup/auth/* in routeSecurityClickup) ---
-
-
-@router.get("/{connectionId}/teams", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-async def get_teams(
- request: Request,
- connectionId: str = Path(..., description="ClickUp UserConnection id"),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.getAuthorizedTeams()
-
-
@router.get("/{connectionId}/teams/{teamId}", response_model=Dict[str, Any])
@limiter.limit("60/minute")
async def get_team(
@@ -93,62 +78,6 @@ async def get_team(
return await cu.getTeam(teamId)
-@router.get("/{connectionId}/teams/{teamId}/spaces", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-async def get_spaces(
- request: Request,
- connectionId: str = Path(...),
- teamId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.getSpaces(teamId)
-
-
-@router.get("/{connectionId}/spaces/{spaceId}/folders", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-async def get_folders(
- request: Request,
- connectionId: str = Path(...),
- spaceId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.getFolders(spaceId)
-
-
-@router.get("/{connectionId}/spaces/{spaceId}/lists", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-async def get_folderless_lists(
- request: Request,
- connectionId: str = Path(...),
- spaceId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.getFolderlessLists(spaceId)
-
-
-@router.get("/{connectionId}/folders/{folderId}/lists", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-async def get_lists_in_folder(
- request: Request,
- connectionId: str = Path(...),
- folderId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.getListsInFolder(folderId)
-
-
@router.get("/{connectionId}/lists/{listId}", response_model=Dict[str, Any])
@limiter.limit("60/minute")
async def get_list(
@@ -193,29 +122,6 @@ async def get_list_tasks(
return await cu.getTasksInList(listId, page=page, include_closed=include_closed)
-class TaskCreateBody(BaseModel):
- body: Dict[str, Any]
-
-
-@router.post("/{connectionId}/lists/{listId}/tasks", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-async def create_list_task(
- request: Request,
- payload: TaskCreateBody,
- connectionId: str = Path(...),
- listId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.createTask(listId, payload.body)
-
-
-class TaskUpdateBody(BaseModel):
- body: Dict[str, Any]
-
-
@router.get("/{connectionId}/tasks/{taskId}", response_model=Dict[str, Any])
@limiter.limit("60/minute")
async def get_task(
@@ -228,61 +134,3 @@ async def get_task(
conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
cu = _svc_for_connection(currentUser, conn)
return await cu.getTask(taskId)
-
-
-@router.put("/{connectionId}/tasks/{taskId}", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-async def update_task(
- request: Request,
- payload: TaskUpdateBody,
- connectionId: str = Path(...),
- taskId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.updateTask(taskId, payload.body)
-
-
-@router.delete("/{connectionId}/tasks/{taskId}", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-async def delete_task(
- request: Request,
- connectionId: str = Path(...),
- taskId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.deleteTask(taskId)
-
-
-@router.get("/{connectionId}/teams/{teamId}/tasks/search", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-async def search_team_tasks(
- request: Request,
- connectionId: str = Path(...),
- teamId: str = Path(...),
- query: str = Query(..., description="Search query"),
- page: int = Query(0),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.searchTeamTasks(teamId, query=query, page=page)
-
-
-@router.get("/{connectionId}/user", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-async def get_authorized_user(
- request: Request,
- connectionId: str = Path(...),
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- interface = getInterface(currentUser)
- conn = _clickup_connection_or_404(interface, connectionId, currentUser.id)
- cu = _svc_for_connection(currentUser, conn)
- return await cu.getAuthorizedUser()
diff --git a/modules/routes/routeDataConnections.py b/modules/routes/routeDataConnections.py
index d01992c5..5e7b2c7e 100644
--- a/modules/routes/routeDataConnections.py
+++ b/modules/routes/routeDataConnections.py
@@ -26,6 +26,8 @@ from modules.datamodels.datamodelPagination import PaginationParams, PaginatedRe
from modules.interfaces.interfaceDbApp import getInterface
from modules.shared.timeUtils import getUtcTimestamp, parseTimestamp
from modules.interfaces.interfaceDbManagement import ComponentObjects
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeDataConnections")
# Configure logger
logger = logging.getLogger(__name__)
@@ -414,7 +416,7 @@ def update_connection(
if not connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Connection not found"
+ detail=routeApiMsg("Connection not found")
)
# Update connection fields
@@ -486,7 +488,7 @@ def connect_service(
if not connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Connection not found"
+ detail=routeApiMsg("Connection not found")
)
# Data-app OAuth (JWT state issued server-side in /auth/connect)
@@ -542,7 +544,7 @@ def disconnect_service(
if not connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Connection not found"
+ detail=routeApiMsg("Connection not found")
)
# Update connection status
@@ -592,7 +594,7 @@ def delete_connection(
if not connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Connection not found"
+ detail=routeApiMsg("Connection not found")
)
# Remove the connection - only need connectionId since permissions are verified
diff --git a/modules/routes/routeDataFiles.py b/modules/routes/routeDataFiles.py
index 17e0ef56..49eaa000 100644
--- a/modules/routes/routeDataFiles.py
+++ b/modules/routes/routeDataFiles.py
@@ -17,6 +17,8 @@ from modules.datamodels.datamodelFileFolder import FileFolder
from modules.shared.attributeUtils import getModelAttributeDefinitions
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeDataFiles")
# Configure logger
logger = logging.getLogger(__name__)
@@ -141,6 +143,8 @@ async def _autoIndexFile(fileId: str, fileName: str, mimeType: str, user):
structure=contentIndex.structure,
)
+ # Re-acquire interface after await to avoid stale user context from the singleton
+ mgmtInterface = interfaceDbManagement.getInterface(user)
mgmtInterface.updateFile(fileId, {"status": "active"})
logger.info(f"Auto-index complete for file {fileId} ({fileName})")
@@ -169,7 +173,7 @@ router = APIRouter(
)
@router.get("/list", response_model=PaginatedResponse[FileItem])
-@limiter.limit("30/minute")
+@limiter.limit("120/minute")
def get_files(
request: Request,
pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
@@ -203,12 +207,17 @@ def get_files(
detail=f"Invalid pagination parameter: {str(e)}"
)
+ recordFilter = None
+ if paginationParams and paginationParams.filters and "folderId" in paginationParams.filters:
+ fVal = paginationParams.filters.pop("folderId")
+ recordFilter = {"folderId": fVal}
+
managementInterface = interfaceDbManagement.getInterface(
currentUser,
mandateId=str(context.mandateId) if context.mandateId else None,
featureInstanceId=str(context.featureInstanceId) if context.featureInstanceId else None
)
- result = managementInterface.getAllFiles(pagination=paginationParams)
+ result = managementInterface.getAllFiles(pagination=paginationParams, recordFilter=recordFilter)
# If pagination was requested, result is PaginatedResult
# If no pagination, result is List[FileItem]
@@ -422,7 +431,7 @@ def create_folder(
name = body.get("name", "")
parentId = body.get("parentId")
if not name:
- raise HTTPException(status_code=400, detail="name is required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("name is required"))
try:
mgmt = interfaceDbManagement.getInterface(
currentUser,
@@ -449,7 +458,7 @@ def rename_folder(
"""Rename a folder."""
newName = body.get("name", "")
if not newName:
- raise HTTPException(status_code=400, detail="name is required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("name is required"))
try:
mgmt = interfaceDbManagement.getInterface(
currentUser,
@@ -554,7 +563,7 @@ def download_folder(
fileEntries = _collectFiles(folderId, "")
if not fileEntries:
- raise HTTPException(status_code=404, detail="Folder is empty")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Folder is empty"))
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf:
@@ -595,7 +604,7 @@ def batch_delete_items(
recursiveFolders = bool(body.get("recursiveFolders", True))
if not isinstance(fileIds, list) or not isinstance(folderIds, list):
- raise HTTPException(status_code=400, detail="fileIds and folderIds must be arrays")
+ raise HTTPException(status_code=400, detail=routeApiMsg("fileIds and folderIds must be arrays"))
try:
mgmt = interfaceDbManagement.getInterface(
@@ -638,7 +647,7 @@ def batch_move_items(
targetParentId = body.get("targetParentId")
if not isinstance(fileIds, list) or not isinstance(folderIds, list):
- raise HTTPException(status_code=400, detail="fileIds and folderIds must be arrays")
+ raise HTTPException(status_code=400, detail=routeApiMsg("fileIds and folderIds must be arrays"))
try:
mgmt = interfaceDbManagement.getInterface(
@@ -683,7 +692,7 @@ def updateFileScope(
raise HTTPException(status_code=400, detail=f"Invalid scope: {scope}. Must be one of {validScopes}")
if scope == "global" and not context.hasSysAdminRole:
- raise HTTPException(status_code=403, detail="Only sysadmins can set global scope")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Only sysadmins can set global scope"))
managementInterface = interfaceDbManagement.getInterface(
context.user,
@@ -862,9 +871,13 @@ def update_file(
) -> FileItem:
"""Update file info"""
try:
+ _EDITABLE_FIELDS = {"fileName", "scope", "tags", "description", "folderId", "neutralize"}
+ safeData = {k: v for k, v in file_info.items() if k in _EDITABLE_FIELDS}
+ if not safeData:
+ raise HTTPException(status_code=400, detail=routeApiMsg("No editable fields provided"))
+
managementInterface = interfaceDbManagement.getInterface(currentUser)
- # Get the file from the database
file = managementInterface.getFile(fileId)
if not file:
raise HTTPException(
@@ -872,25 +885,23 @@ def update_file(
detail=f"File with ID {fileId} not found"
)
- if file_info.get("scope") == "global" and not _hasSysAdminRole(str(currentUser.id)):
+ if safeData.get("scope") == "global" and not _hasSysAdminRole(str(currentUser.id)):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Only sysadmins can set global scope",
+ detail=routeApiMsg("Only sysadmins can set global scope"),
)
- # Check if user has access to the file using RBAC
if not managementInterface.checkRbacPermission(FileItem, "update", fileId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Not authorized to update this file"
+ detail=routeApiMsg("Not authorized to update this file")
)
- # Update the file
- result = managementInterface.updateFile(fileId, file_info)
+ result = managementInterface.updateFile(fileId, safeData)
if not result:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to update file"
+ detail=routeApiMsg("Failed to update file")
)
# Get updated file
@@ -928,49 +939,11 @@ def delete_file(
if not success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error deleting the file"
+ detail=routeApiMsg("Error deleting the file")
)
return {"message": f"File with ID {fileId} successfully deleted"}
-@router.get("/stats", response_model=Dict[str, Any])
-@limiter.limit("30/minute")
-def get_file_stats(
- request: Request,
- currentUser: User = Depends(getCurrentUser)
-) -> Dict[str, Any]:
- """Returns statistics about the stored files"""
- try:
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- # Get all files - metadata only
- allFiles = managementInterface.getAllFiles()
-
- # Calculate statistics
- totalFiles = len(allFiles)
- totalSize = sum(file.fileSize for file in allFiles)
-
- # Group by file type
- fileTypes = {}
- for file in allFiles:
- fileType = file.mimeType.split("/")[0]
- if fileType not in fileTypes:
- fileTypes[fileType] = 0
- fileTypes[fileType] += 1
-
- return {
- "totalFiles": totalFiles,
- "totalSizeBytes": totalSize,
- "fileTypes": fileTypes
- }
-
- except Exception as e:
- logger.error(f"Error retrieving file statistics: {str(e)}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Error retrieving file statistics: {str(e)}"
- )
-
@router.get("/{fileId}/download")
@limiter.limit("30/minute")
def download_file(
diff --git a/modules/routes/routeDataMandates.py b/modules/routes/routeDataMandates.py
index cb6a3efc..0fcb6303 100644
--- a/modules/routes/routeDataMandates.py
+++ b/modules/routes/routeDataMandates.py
@@ -32,6 +32,8 @@ from modules.datamodels.datamodelRbac import Role
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.routes.routeNotifications import create_access_change_notification
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import SubscriptionCapacityException
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeDataMandates")
# =============================================================================
@@ -103,7 +105,7 @@ def get_mandates(
if not adminMandateIds:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required"
+ detail=routeApiMsg("Admin role required")
)
# Parse pagination parameter
@@ -180,7 +182,7 @@ def get_mandate_filter_values(
if not isSysAdmin:
adminMandateIds = _getAdminMandateIds(context)
if not adminMandateIds:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Admin role required"))
appInterface = interfaceDbApp.getRootInterface()
@@ -248,7 +250,7 @@ def get_mandate(
if mandateId not in adminMandateIds:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required for this mandate"
+ detail=routeApiMsg("Admin role required for this mandate")
)
appInterface = interfaceDbApp.getRootInterface()
@@ -289,7 +291,7 @@ def create_mandate(
if not name or (isinstance(name, str) and name.strip() == ''):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Mandate name is required"
+ detail=routeApiMsg("Mandate name is required")
)
# Get optional fields with defaults
@@ -308,7 +310,7 @@ def create_mandate(
if not newMandate:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to create mandate"
+ detail=routeApiMsg("Failed to create mandate")
)
try:
@@ -327,7 +329,7 @@ def create_mandate(
from modules.interfaces.interfaceDbSubscription import _getRootInterface as _getSubRoot
from datetime import datetime, timezone, timedelta
- planKey = mandateData.get("planKey", "TRIAL_7D")
+ planKey = mandateData.get("planKey", "TRIAL_14D")
plan = BUILTIN_PLANS.get(planKey)
if plan:
now = datetime.now(timezone.utc)
@@ -392,7 +394,7 @@ def update_mandate(
if not updatedMandate:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to update mandate"
+ detail=routeApiMsg("Failed to update mandate")
)
logger.info(f"Mandate {mandateId} updated by SysAdmin {currentUser.id}")
@@ -438,7 +440,7 @@ def delete_mandate(
if confirmName != mandateName:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Hard-delete requires X-Confirm-Name header matching the mandate name"
+ detail=routeApiMsg("Hard-delete requires X-Confirm-Name header matching the mandate name")
)
try:
@@ -487,7 +489,7 @@ def list_mandate_users(
if not _hasMandateAdminRole(context, targetMandateId) and not context.hasSysAdminRole:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required"
+ detail=routeApiMsg("Mandate-Admin role required")
)
try:
@@ -647,7 +649,7 @@ def get_mandate_users_filter_values(
) -> list:
"""Return distinct filter values for a column in mandate users."""
if not _hasMandateAdminRole(context, targetMandateId) and not context.hasSysAdminRole:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Mandate-Admin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Mandate-Admin role required"))
try:
from modules.routes.routeDataUsers import _handleFilterValuesRequest
@@ -714,7 +716,7 @@ def add_user_to_mandate(
if not _hasMandateAdminRole(context, targetMandateId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to add users"
+ detail=routeApiMsg("Mandate-Admin role required to add users")
)
try:
@@ -777,7 +779,7 @@ def add_user_to_mandate(
f"with roles {data.roleIds}"
)
- mname = _mandate_display_name(mandate)
+ mname = _mandate_display_name(mandate, getattr(targetUser, "language", None))
create_access_change_notification(
data.targetUserId,
"Mandantenzugriff",
@@ -831,7 +833,7 @@ def remove_user_from_mandate(
if not _hasMandateAdminRole(context, targetMandateId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required"
+ detail=routeApiMsg("Mandate-Admin role required")
)
try:
@@ -857,7 +859,7 @@ def remove_user_from_mandate(
if _isLastMandateAdmin(rootInterface, targetMandateId, targetUserId):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Cannot remove the last admin from a mandate. Assign another admin first."
+ detail=routeApiMsg("Cannot remove the last admin from a mandate. Assign another admin first.")
)
# Delete UserMandate (CASCADE will delete UserMandateRole entries)
@@ -875,7 +877,9 @@ def remove_user_from_mandate(
logger.info(f"User {context.user.id} removed user {targetUserId} from mandate {targetMandateId}")
- mname = _mandate_display_name(mandate)
+ removedUser = rootInterface.getUser(targetUserId)
+ notifyLang = getattr(removedUser, "language", None) if removedUser else getattr(context.user, "language", None)
+ mname = _mandate_display_name(mandate, notifyLang)
create_access_change_notification(
targetUserId,
"Mandantenzugriff",
@@ -920,7 +924,7 @@ def update_user_roles_in_mandate(
if not _hasMandateAdminRole(context, targetMandateId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required"
+ detail=routeApiMsg("Mandate-Admin role required")
)
try:
@@ -953,7 +957,7 @@ def update_user_roles_in_mandate(
if _isLastMandateAdmin(rootInterface, targetMandateId, targetUserId):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Cannot remove admin role from the last admin. Assign another admin first."
+ detail=routeApiMsg("Cannot remove admin role from the last admin. Assign another admin first.")
)
# Remove existing role assignments
@@ -980,7 +984,9 @@ def update_user_roles_in_mandate(
)
mandate_meta = rootInterface.getMandate(targetMandateId)
- mname = _mandate_display_name(mandate_meta)
+ roleUser = rootInterface.getUser(targetUserId)
+ notifyLang = getattr(roleUser, "language", None) if roleUser else getattr(context.user, "language", None)
+ mname = _mandate_display_name(mandate_meta, notifyLang)
create_access_change_notification(
targetUserId,
"Mandantenrollen geändert",
@@ -1011,7 +1017,7 @@ def update_user_roles_in_mandate(
# Helper Functions
# =============================================================================
-def _mandate_display_name(mandate: Any) -> str:
+def _mandate_display_name(mandate: Any, requestLang: Optional[str] = None) -> str:
"""Human-readable mandate label for notifications."""
if mandate is None:
return ""
@@ -1020,14 +1026,16 @@ def _mandate_display_name(mandate: Any) -> str:
return str(mandate["label"])
name = mandate.get("name")
if isinstance(name, dict):
- return str(name.get("de") or name.get("en") or (next(iter(name.values()), "") if name else ""))
+ picked = (requestLang and name.get(requestLang)) or name.get("xx") or next(iter(name.values()), "")
+ return str(picked) if picked is not None else ""
return str(name or mandate.get("id", ""))
label = getattr(mandate, "label", None)
if label:
return str(label)
name = getattr(mandate, "name", None)
if isinstance(name, dict):
- return str(name.get("de") or name.get("en") or (next(iter(name.values()), "") if name else ""))
+ picked = (requestLang and name.get(requestLang)) or name.get("xx") or next(iter(name.values()), "")
+ return str(picked) if picked is not None else ""
if name is not None:
return str(name)
return str(getattr(mandate, "id", ""))
diff --git a/modules/routes/routeDataPrompts.py b/modules/routes/routeDataPrompts.py
index f9246ab6..2644b7e3 100644
--- a/modules/routes/routeDataPrompts.py
+++ b/modules/routes/routeDataPrompts.py
@@ -14,6 +14,8 @@ import modules.interfaces.interfaceDbManagement as interfaceDbManagement
from modules.datamodels.datamodelUtils import Prompt
from modules.datamodels.datamodelUam import User
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeDataPrompts")
# Configure logger
logger = logging.getLogger(__name__)
@@ -173,7 +175,7 @@ def update_prompt(
if not updatedPrompt:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error updating the prompt"
+ detail=routeApiMsg("Error updating the prompt")
)
return Prompt(**updatedPrompt)
@@ -207,7 +209,7 @@ def delete_prompt(
if not success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error deleting the prompt"
+ detail=routeApiMsg("Error deleting the prompt")
)
return {"message": f"Prompt with ID {promptId} successfully deleted"}
\ No newline at end of file
diff --git a/modules/routes/routeDataSources.py b/modules/routes/routeDataSources.py
index e210d094..db4b9a4f 100644
--- a/modules/routes/routeDataSources.py
+++ b/modules/routes/routeDataSources.py
@@ -10,6 +10,8 @@ from modules.auth import limiter, getRequestContext, RequestContext
from modules.auth.authentication import _hasSysAdminRole
from modules.datamodels.datamodelDataSource import DataSource
from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeDataSources")
logger = logging.getLogger(__name__)
@@ -52,7 +54,7 @@ def _updateDataSourceScope(
raise HTTPException(status_code=400, detail=f"Invalid scope: {scope}. Must be one of {_VALID_SCOPES}")
if scope == "global" and not _hasSysAdminRole(context.user):
- raise HTTPException(status_code=403, detail="Only sysadmins can set global scope")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Only sysadmins can set global scope"))
try:
from modules.interfaces.interfaceDbApp import getRootInterface
diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py
index 7cce66ca..28d11392 100644
--- a/modules/routes/routeDataUsers.py
+++ b/modules/routes/routeDataUsers.py
@@ -24,6 +24,8 @@ from modules.auth import limiter, getRequestContext, RequestContext
from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
+from modules.shared.i18nRegistry import apiRouteContext, resolveText
+routeApiMsg = apiRouteContext("routeDataUsers")
# Configure logger
logger = logging.getLogger(__name__)
@@ -69,7 +71,11 @@ def _isAdminForUser(context: RequestContext, targetUserId: str) -> bool:
return False
-def _extractDistinctValues(items: List[Dict[str, Any]], columnKey: str) -> List[str]:
+def _extractDistinctValues(
+ items: List[Dict[str, Any]],
+ columnKey: str,
+ requestLang: Optional[str] = None,
+) -> List[str]:
"""Extract sorted distinct display values for a column from enriched items."""
values = set()
for item in items:
@@ -81,9 +87,9 @@ def _extractDistinctValues(items: List[Dict[str, Any]], columnKey: str) -> List[
elif isinstance(val, (int, float)):
values.add(str(val))
elif isinstance(val, dict):
- text = val.get("en") or next((v for v in val.values() if isinstance(v, str) and v), None)
+ text = resolveText(val, requestLang)
if text:
- values.add(str(text))
+ values.add(text)
else:
values.add(str(val))
return sorted(values, key=lambda v: v.lower())
@@ -93,6 +99,7 @@ def _handleFilterValuesRequest(
items: List[Dict[str, Any]],
column: str,
paginationJson: Optional[str] = None,
+ requestLang: Optional[str] = None,
) -> List[str]:
"""
Generic handler for /filter-values endpoints.
@@ -115,7 +122,7 @@ def _handleFilterValuesRequest(
pass
crossFiltered = _applyFiltersAndSort(items, crossFilterParams)
- return _extractDistinctValues(crossFiltered, column)
+ return _extractDistinctValues(crossFiltered, column, requestLang)
def _applyFiltersAndSort(items: List[Dict[str, Any]], paginationParams: Optional[PaginationParams]) -> List[Dict[str, Any]]:
@@ -297,7 +304,7 @@ def get_user_options(
elif context.hasSysAdminRole:
users = appInterface.getAllUsers()
else:
- raise HTTPException(status_code=403, detail="Access denied")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
return [
{"value": user.id, "label": user.fullName or user.username or user.email or user.id}
@@ -420,26 +427,21 @@ def get_users(
if not adminMandateIds:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="No admin access to any mandate"
+ detail=routeApiMsg("No admin access to any mandate")
)
- # Aggregate users across all admin mandates (deduplicate by user ID)
- seenUserIds = set()
- allUsers = []
- for mid in adminMandateIds:
- mandateUsers = rootInterface.getUsersByMandate(mid)
- if isinstance(mandateUsers, list):
- users = mandateUsers
- elif hasattr(mandateUsers, 'items'):
- users = mandateUsers.items
- else:
- users = []
- for u in users:
- uid = u.get("id") if isinstance(u, dict) else getattr(u, "id", None)
- if uid and uid not in seenUserIds:
- seenUserIds.add(uid)
- userData = u if isinstance(u, dict) else u.model_dump() if hasattr(u, 'model_dump') else vars(u)
- allUsers.append(userData)
+ from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel
+ allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds})
+ uniqueUserIds = list({
+ (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
+ for um in (allUM or [])
+ if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
+ })
+ batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {}
+ allUsers = [
+ u.model_dump() if hasattr(u, 'model_dump') else vars(u)
+ for u in batchUsers.values()
+ ]
# Apply server-side filtering and sorting
filteredUsers = _applyFiltersAndSort(allUsers, paginationParams)
@@ -510,7 +512,7 @@ def get_user_filter_values(
result = appInterface.getUsersByMandate(str(context.mandateId), None)
users = result if isinstance(result, list) else (result.items if hasattr(result, 'items') else [])
items = [u.model_dump() if hasattr(u, 'model_dump') else u for u in users]
- return _handleFilterValuesRequest(items, column, pagination)
+ return _handleFilterValuesRequest(items, column, pagination, getattr(context.user, "language", None))
elif context.hasSysAdminRole:
# SysAdmin: use SQL DISTINCT for DB columns
try:
@@ -522,7 +524,7 @@ def get_user_filter_values(
except Exception:
users = appInterface.getAllUsers()
items = [u.model_dump() if hasattr(u, 'model_dump') else u for u in users]
- return _handleFilterValuesRequest(items, column, pagination)
+ return _handleFilterValuesRequest(items, column, pagination, getattr(context.user, "language", None))
else:
# Non-admin multi-mandate: aggregate across admin mandates (in-memory)
rootInterface = getRootInterface()
@@ -541,18 +543,16 @@ def get_user_filter_values(
break
if not adminMandateIds:
return []
- seenUserIds = set()
- users = []
- for mid in adminMandateIds:
- mandateUsers = rootInterface.getUsersByMandate(mid)
- uList = mandateUsers if isinstance(mandateUsers, list) else (mandateUsers.items if hasattr(mandateUsers, 'items') else [])
- for u in uList:
- uid = u.get("id") if isinstance(u, dict) else getattr(u, "id", None)
- if uid and uid not in seenUserIds:
- seenUserIds.add(uid)
- users.append(u)
- items = [u.model_dump() if hasattr(u, 'model_dump') else u for u in users]
- return _handleFilterValuesRequest(items, column, pagination)
+ from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel
+ allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds})
+ uniqueUserIds = list({
+ (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
+ for um in (allUM or [])
+ if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None))
+ })
+ batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {}
+ items = [u.model_dump() if hasattr(u, 'model_dump') else vars(u) for u in batchUsers.values()]
+ return _handleFilterValuesRequest(items, column, pagination, getattr(context.user, "language", None))
except HTTPException:
raise
except Exception as e:
@@ -588,7 +588,7 @@ def get_user(
if not userMandate:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="User not in your mandate"
+ detail=routeApiMsg("User not in your mandate")
)
return user
@@ -606,7 +606,7 @@ class CreateUserRequest(BaseModel):
username: str
email: Optional[str] = None
fullName: Optional[str] = None
- language: str = "en"
+ language: str = "de"
enabled: bool = True
isSysAdmin: bool = False
password: Optional[str] = None
@@ -643,7 +643,7 @@ def create_user(
if not userRole:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="No 'user' role found in system — cannot assign user to mandate"
+ detail=routeApiMsg("No 'user' role found in system — cannot assign user to mandate")
)
appInterface.createUserMandate(
@@ -674,7 +674,7 @@ def update_user(
if not isSelfUpdate and not _isAdminForUser(context, userId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to update other users"
+ detail=routeApiMsg("Admin role required to update other users")
)
# Use rootInterface for user lookup/update (avoids RBAC filtering on User table)
@@ -694,7 +694,7 @@ def update_user(
if not updatedUser:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error updating the user"
+ detail=routeApiMsg("Error updating the user")
)
return updatedUser
@@ -716,7 +716,7 @@ def reset_user_password(
if not _isAdminForUser(context, userId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to reset passwords"
+ detail=routeApiMsg("Admin role required to reset passwords")
)
# Get user interface
@@ -726,7 +726,7 @@ def reset_user_password(
if len(newPassword) < 8:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Password must be at least 8 characters long"
+ detail=routeApiMsg("Password must be at least 8 characters long")
)
# Reset password
@@ -734,7 +734,7 @@ def reset_user_password(
if not success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to reset password"
+ detail=routeApiMsg("Failed to reset password")
)
# SECURITY: Automatically revoke all tokens for the user after password reset
@@ -799,14 +799,14 @@ def change_password(
if not appInterface.verifyPassword(currentPassword, context.user.passwordHash):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Current password is incorrect"
+ detail=routeApiMsg("Current password is incorrect")
)
# Validate new password strength
if len(newPassword) < 8:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="New password must be at least 8 characters long"
+ detail=routeApiMsg("New password must be at least 8 characters long")
)
# Change password
@@ -814,7 +814,7 @@ def change_password(
if not success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to change password"
+ detail=routeApiMsg("Failed to change password")
)
# SECURITY: Automatically revoke all tokens for the user after password change
@@ -884,7 +884,7 @@ def send_password_link(
if not _isAdminForUser(context, userId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin role required to send password links"
+ detail=routeApiMsg("Admin role required to send password links")
)
# Get user interface
@@ -895,14 +895,14 @@ def send_password_link(
if not targetUser:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="User not found"
+ detail=routeApiMsg("User not found")
)
# Check if user has an email
if not targetUser.email:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="User has no email address configured"
+ detail=routeApiMsg("User has no email address configured")
)
# Use root interface for token operations
@@ -949,7 +949,7 @@ def send_password_link(
logger.warning(f"Failed to send password setup email to {targetUser.email}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to send email"
+ detail=routeApiMsg("Failed to send email")
)
except HTTPException:
@@ -1017,7 +1017,7 @@ def delete_user(
if not userMandate:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Cannot delete user outside your mandate"
+ detail=routeApiMsg("Cannot delete user outside your mandate")
)
# Delete UserMandate entries for this user first
@@ -1029,7 +1029,7 @@ def delete_user(
if not success:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error deleting the user"
+ detail=routeApiMsg("Error deleting the user")
)
return {"message": f"User with ID {userId} successfully deleted"}
diff --git a/modules/routes/routeGdpr.py b/modules/routes/routeGdpr.py
index f923932e..fce8ab69 100644
--- a/modules/routes/routeGdpr.py
+++ b/modules/routes/routeGdpr.py
@@ -25,6 +25,8 @@ from modules.interfaces.interfaceDbApp import getRootInterface
from modules.shared.timeUtils import getUtcTimestamp
from modules.shared.auditLogger import audit_logger
from modules.shared.gdprDeletion import deleteUserDataAcrossAllDatabases, buildDeletionSummary
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeGdpr")
logger = logging.getLogger(__name__)
@@ -316,14 +318,14 @@ def delete_account(
if not confirmDeletion:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Deletion not confirmed. Set confirmDeletion=true to proceed."
+ detail=routeApiMsg("Deletion not confirmed. Set confirmDeletion=true to proceed.")
)
# Prevent SysAdmin self-deletion (safety measure)
if getattr(currentUser, "isSysAdmin", False):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="SysAdmin accounts cannot be self-deleted. Contact another SysAdmin."
+ detail=routeApiMsg("SysAdmin accounts cannot be self-deleted. Contact another SysAdmin.")
)
try:
diff --git a/modules/routes/routeI18n.py b/modules/routes/routeI18n.py
new file mode 100644
index 00000000..a50739d7
--- /dev/null
+++ b/modules/routes/routeI18n.py
@@ -0,0 +1,1055 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Public and authenticated routes for UI language sets (DB-backed i18n).
+
+Architecture:
+- xx = base set (meta): key = German plaintext, value = UI context for AI
+- All languages (incl. de) are AI-generated translations from xx
+- AI translation pipeline uses context from xx to disambiguate translations
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import math
+import re
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, Request, UploadFile, status
+from fastapi.responses import Response
+from pydantic import BaseModel, Field
+
+from modules.auth import getCurrentUser, requireSysAdminRole
+from modules.connectors.connectorDbPostgre import _get_cached_connector
+from modules.datamodels.datamodelAi import (
+ AiCallOptions,
+ AiCallRequest,
+ AiCallResponse,
+ OperationTypeEnum,
+ PriorityEnum,
+)
+from modules.datamodels.datamodelUiLanguage import I18nEntry, UiLanguageSet
+from modules.datamodels.datamodelUam import User
+from modules.datamodels.datamodelRbac import Role
+from modules.datamodels.datamodelFeatures import Feature
+from modules.datamodels.datamodelNotification import NotificationType
+from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface
+from modules.routes.routeNotifications import _createNotification
+from modules.shared.configuration import APP_CONFIG
+from modules.shared.i18nRegistry import _loadCache as _reloadI18nCache, apiRouteContext
+from modules.shared.timeUtils import getUtcTimestamp
+
+routeApiMsg = apiRouteContext("routeI18n")
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(
+ prefix="/api/i18n",
+ tags=["i18n"],
+ responses={404: {"description": "Not found"}},
+)
+
+_MIN_AI_BILLING_ESTIMATE_CHF = 0.01
+_TRANSLATE_BATCH_SIZE = 80
+_TRANSLATE_BATCH_PAUSE_S = 2.0
+_TRANSLATE_RATE_LIMIT_MAX_RETRIES = 3
+
+_PROTECTED_CODES = frozenset({"xx"})
+
+# In-memory set of language codes currently being updated (sync / create).
+_UPDATING_CODES: Set[str] = set()
+
+# ---------------------------------------------------------------------------
+# ISO 639-1 label map (used when creating a language without explicit label)
+# ---------------------------------------------------------------------------
+
+_ISO_LABELS: Dict[str, str] = {
+ "de": "Deutsch", "gsw": "Schweizerdeutsch", "en": "English", "fr": "Français", "it": "Italiano",
+ "es": "Español", "pt": "Português", "nl": "Nederlands", "pl": "Polski",
+ "cs": "Čeština", "sk": "Slovenčina", "sv": "Svenska", "no": "Norsk",
+ "da": "Dansk", "fi": "Suomi", "hu": "Magyar", "ro": "Română",
+ "bg": "Български", "hr": "Hrvatski", "sl": "Slovenščina", "et": "Eesti",
+ "lv": "Latviešu", "lt": "Lietuvių", "el": "Ελληνικά", "tr": "Türkçe",
+ "ru": "Русский", "uk": "Українська", "ar": "العربية", "he": "עברית",
+ "zh": "中文", "ja": "日本語", "ko": "한국어", "hi": "हिन्दी",
+ "th": "ไทย", "vi": "Tiếng Việt", "id": "Bahasa Indonesia", "ms": "Bahasa Melayu",
+ "tl": "Filipino", "sw": "Kiswahili", "af": "Afrikaans", "sq": "Shqip",
+ "am": "አማርኛ", "hy": "Հայերեն", "az": "Azərbaycan", "eu": "Euskara",
+ "be": "Беларуская", "bn": "বাংলা", "bs": "Bosanski", "ca": "Català",
+ "cy": "Cymraeg", "eo": "Esperanto", "fa": "فارسی", "ga": "Gaeilge",
+ "gl": "Galego", "gu": "ગુજરાતી", "ha": "Hausa", "is": "Íslenska",
+ "jv": "Basa Jawa", "ka": "ქართული", "kk": "Қазақ", "km": "ខ្មែរ",
+ "kn": "ಕನ್ನಡ", "ku": "Kurdî", "ky": "Кыргызча", "la": "Latina",
+ "lb": "Lëtzebuergesch", "lo": "ລາວ", "mk": "Македонски", "ml": "മലയാളം",
+ "mn": "Монгол", "mr": "मराठी", "mt": "Malti", "my": "မြန်မာ",
+ "ne": "नेपाली", "or": "ଓଡ଼ିଆ", "pa": "ਪੰਜਾਬੀ", "ps": "پښتو",
+ "si": "සිංහල", "so": "Soomaali", "sr": "Српски", "su": "Basa Sunda",
+ "ta": "தமிழ்", "te": "తెలుగు", "tg": "Тоҷикӣ", "tk": "Türkmen",
+ "ur": "اردو", "uz": "Oʻzbek", "yo": "Yorùbá", "zu": "isiZulu",
+}
+
+
+# ---------------------------------------------------------------------------
+# DB helpers
+# ---------------------------------------------------------------------------
+
+def _publicMgmtDb():
+ return _get_cached_connector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase="poweron_management",
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
+ dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
+ userId="__i18n_public__",
+ )
+
+
+def _rowEntries(row: dict) -> List[dict]:
+ """Read entries from a DB row, supporting both new (entries) and legacy (keys) format."""
+ entries = row.get("entries")
+ if isinstance(entries, list) and entries:
+ return entries
+ keys = row.get("keys")
+ if isinstance(keys, dict) and keys:
+ return [{"context": "ui", "key": k, "value": v} for k, v in keys.items()]
+ return []
+
+
+def _entriesToKeyValueMap(entries: List[dict]) -> Dict[str, str]:
+ """Convert entries list to a flat key->value map (for frontend consumption)."""
+ return {e["key"]: e.get("value", "") for e in entries if e.get("key")}
+
+
+def _row_to_public(row: dict) -> dict:
+ entries = _rowEntries(row)
+ return {
+ "code": row["id"],
+ "label": row.get("label"),
+ "status": row.get("status"),
+ "entries": entries,
+ }
+
+
+def _loadMasterXxEntries(db) -> List[dict]:
+ """Load the xx base set entries from DB."""
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "xx"})
+ if not rows:
+ return []
+ return _rowEntries(rows[0])
+
+
+def _userMemberMandateIds(currentUser: User) -> List[str]:
+ from modules.interfaces.interfaceDbApp import getRootInterface
+
+ root = getRootInterface()
+ memberships = root.getUserMandates(str(currentUser.id))
+ out = []
+ for um in memberships:
+ mid = getattr(um, "mandateId", None) or (
+ um.get("mandateId") if isinstance(um, dict) else None
+ )
+ if mid:
+ out.append(str(mid))
+ return list(dict.fromkeys(out))
+
+
+def _mandatePassesAiPoolBilling(currentUser: User, mandateId: str, userId: str) -> bool:
+ from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface
+
+ bi = getBillingInterface(currentUser, mandateId)
+ res = bi.checkBalance(mandateId, userId, _MIN_AI_BILLING_ESTIMATE_CHF)
+ return bool(res.allowed)
+
+
+# ---------------------------------------------------------------------------
+# AI Translation helpers
+# ---------------------------------------------------------------------------
+
+_aiObjectsSingleton = None
+
+
+async def _getAiObjects():
+ global _aiObjectsSingleton
+ if _aiObjectsSingleton is None:
+ from modules.interfaces.interfaceAiObjects import AiObjects
+ _aiObjectsSingleton = await AiObjects.create()
+ return _aiObjectsSingleton
+
+
+def _makeBillingCallback(currentUser: User, mandateId: str):
+ from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService
+
+ billingService = getBillingService(currentUser, mandateId)
+
+ def _cb(response: AiCallResponse) -> None:
+ if not response or getattr(response, "errorCount", 0) > 0:
+ return
+ basePriceCHF = getattr(response, "priceCHF", 0.0)
+ if not basePriceCHF or basePriceCHF <= 0:
+ return
+ provider = getattr(response, "provider", None) or "unknown"
+ modelName = getattr(response, "modelName", None) or "unknown"
+ try:
+ billingService.recordUsage(
+ priceCHF=basePriceCHF,
+ aicoreProvider=provider,
+ aicoreModel=modelName,
+ description=f"i18n translation ({modelName})",
+ processingTime=getattr(response, "processingTime", None),
+ bytesSent=getattr(response, "bytesSent", None),
+ bytesReceived=getattr(response, "bytesReceived", None),
+ )
+ except Exception as e:
+ logger.error("i18n billing callback failed: %s", e)
+
+ return _cb
+
+
+async def _translateBatch(
+ keysToTranslate: Dict[str, str],
+ targetLanguageLabel: str,
+ targetCode: str,
+ billingCallback=None,
+) -> Dict[str, str]:
+ """Translate German keys into targetLanguageLabel.
+
+ keysToTranslate: { germanKey: uiContext }
+ Returns: { germanKey: translatedValue }
+ """
+ if not keysToTranslate:
+ return {}
+
+ aiObjects = await _getAiObjects()
+ allKeys = list(keysToTranslate.items())
+ totalBatches = math.ceil(len(allKeys) / _TRANSLATE_BATCH_SIZE)
+ result: Dict[str, str] = {}
+
+ for batchIdx in range(totalBatches):
+ chunk = allKeys[batchIdx * _TRANSLATE_BATCH_SIZE : (batchIdx + 1) * _TRANSLATE_BATCH_SIZE]
+ payload = [{"key": k, "context": v} for k, v in chunk]
+ jsonPayload = json.dumps(payload, ensure_ascii=False)
+
+ systemPrompt = (
+ f"Du bist ein professioneller Übersetzer für Software-UI-Texte. "
+ f"Du erhältst ein JSON-Array mit Objekten: {{\"key\": \"deutscher Text\", \"context\": \"UI-Kontext\"}}. "
+ f"Der Kontext beschreibt, wo der Text in der Anwendung verwendet wird (Datei, Komponente). "
+ f"Übersetze jeden «key» ins {targetLanguageLabel} (ISO {targetCode}). "
+ f"Behalte Platzhalter wie {{variable}} exakt bei. "
+ f"Antworte NUR mit einem JSON-Objekt — Keys = deutsche Originaltexte, Values = Übersetzungen. "
+ f"Kein Markdown, kein Kommentar."
+ )
+
+ aiRequest = AiCallRequest(
+ prompt=f"Übersetze diese UI-Labels:\n{jsonPayload}",
+ context=systemPrompt,
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_GENERATE,
+ priority=PriorityEnum.BALANCED,
+ compressPrompt=False,
+ compressContext=False,
+ resultFormat="json",
+ temperature=0.2,
+ ),
+ )
+
+ if billingCallback:
+ aiObjects.billingCallback = billingCallback
+
+ batchDone = False
+ for retryAttempt in range(_TRANSLATE_RATE_LIMIT_MAX_RETRIES):
+ try:
+ response = await aiObjects.callWithTextContext(aiRequest)
+ if response and response.content:
+ raw = response.content.strip()
+ if raw.startswith("```"):
+ raw = re.sub(r"^```[a-z]*\n?", "", raw)
+ raw = re.sub(r"\n?```$", "", raw)
+ parsed = json.loads(raw)
+ if isinstance(parsed, dict):
+ result.update(parsed)
+ else:
+ logger.warning("i18n AI batch %d/%d returned non-dict", batchIdx + 1, totalBatches)
+ else:
+ logger.warning("i18n AI batch %d/%d empty response", batchIdx + 1, totalBatches)
+ batchDone = True
+ break
+ except json.JSONDecodeError as je:
+ logger.error("i18n AI batch %d/%d JSON parse error: %s", batchIdx + 1, totalBatches, je)
+ batchDone = True
+ break
+ except Exception as e:
+ errStr = str(e)
+ if "rate_limit" in errStr.lower() or "429" in errStr or "Rate limit" in errStr:
+ waitSec = _TRANSLATE_BATCH_PAUSE_S * (2 ** retryAttempt)
+ logger.warning(
+ "i18n AI batch %d/%d rate-limited (attempt %d/%d), waiting %.1fs",
+ batchIdx + 1, totalBatches, retryAttempt + 1,
+ _TRANSLATE_RATE_LIMIT_MAX_RETRIES, waitSec,
+ )
+ await asyncio.sleep(waitSec)
+ continue
+ logger.error("i18n AI batch %d/%d failed: %s", batchIdx + 1, totalBatches, e)
+ batchDone = True
+ break
+ finally:
+ aiObjects.billingCallback = None
+
+ if not batchDone:
+ logger.error("i18n AI batch %d/%d exhausted rate-limit retries", batchIdx + 1, totalBatches)
+
+ if batchIdx < totalBatches - 1:
+ await asyncio.sleep(_TRANSLATE_BATCH_PAUSE_S)
+
+ _matchCapitalization(keysToTranslate, result)
+ return result
+
+
+def _matchCapitalization(originals: Dict[str, str], translations: Dict[str, str]) -> None:
+ """Ensure translations preserve the capitalisation pattern of the original key."""
+ for key, translated in translations.items():
+ if not key or not translated:
+ continue
+ if key[0].isupper() and translated[0].islower():
+ translations[key] = translated[0].upper() + translated[1:]
+ elif key[0].islower() and translated[0].isupper():
+ translations[key] = translated[0].lower() + translated[1:]
+
+
+def _resolveMandateIdForAiI18n(request: Request, currentUser: User) -> str:
+ userId = str(currentUser.id)
+ memberIds = _userMemberMandateIds(currentUser)
+ if not memberIds:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=routeApiMsg("Mindestens eine Mandats-Mitgliedschaft ist für die AI-Nutzung erforderlich."),
+ )
+
+ headerRaw = (
+ request.headers.get("X-Mandate-Id") or request.headers.get("x-mandate-id") or ""
+ ).strip()
+ if headerRaw:
+ if headerRaw not in memberIds:
+ raise HTTPException(
+ status_code=status.HTTP_403_FORBIDDEN,
+ detail=routeApiMsg("X-Mandate-Id ist kein Mandat Ihrer Mitgliedschaft."),
+ )
+ if _mandatePassesAiPoolBilling(currentUser, headerRaw, userId):
+ return headerRaw
+ for mid in memberIds:
+ if _mandatePassesAiPoolBilling(currentUser, mid, userId):
+ return mid
+ raise HTTPException(
+ status_code=status.HTTP_402_PAYMENT_REQUIRED,
+ detail=routeApiMsg("Nicht genügend AI-Guthaben (Mandats-Pool) für diese Aktion."),
+ )
+
+
+# ---------------------------------------------------------------------------
+# xx-Master sync from frontend codebase (local dev fallback)
+# ---------------------------------------------------------------------------
+
+_REPO_ROOT = Path(__file__).resolve().parents[3]
+_FRONTEND_SRC = _REPO_ROOT / "frontend_nyla" / "src"
+
+_T_CALL_RE = re.compile(r"""\bt\(\s*'((?:\\.|[^'])+)'\s*(?:,|\))""")
+
+
+def _scanCodebaseKeys() -> List[dict]:
+ """Local dev fallback: scan frontend src for t() calls. Returns entries with context='ui'."""
+ keys: Set[str] = set()
+ if not _FRONTEND_SRC.is_dir():
+ logger.warning("i18n codebase scan: %s not found", _FRONTEND_SRC)
+ return []
+
+ for ext in ("*.tsx", "*.ts"):
+ for filepath in _FRONTEND_SRC.rglob(ext):
+ try:
+ content = filepath.read_text(encoding="utf-8", errors="replace")
+ except OSError:
+ continue
+ for m in _T_CALL_RE.finditer(content):
+ raw = m.group(1)
+ raw = raw.replace("\\'", "'")
+ if raw:
+ keys.add(raw)
+ return [{"context": "ui", "key": k, "value": ""} for k in sorted(keys)]
+
+
+async def _readOptionalEntriesFromBody(request: Request) -> Optional[List[dict]]:
+ """Read entries from request body. Accepts {entries: [{context, key, value}, ...]}."""
+ body = await request.body()
+ if not body or not body.strip():
+ return None
+ try:
+ data = json.loads(body.decode("utf-8"))
+ except (json.JSONDecodeError, UnicodeDecodeError):
+ return None
+ if not isinstance(data, dict) or "entries" not in data:
+ return None
+ entries = data.get("entries")
+ if not isinstance(entries, list):
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=routeApiMsg("Feld «entries» muss ein JSON-Array sein."),
+ )
+ result = []
+ for e in entries:
+ if not isinstance(e, dict) or not e.get("key"):
+ continue
+ result.append({
+ "context": str(e.get("context", "ui")),
+ "key": str(e["key"]),
+ "value": str(e.get("value", "")),
+ })
+ return result if result else None
+
+
+def _syncXxMaster(db, userId: Optional[str], incomingEntries: List[dict]) -> Dict[str, Any]:
+ """Synchronise the xx base set with incoming UI entries.
+
+ Only touches entries whose context is "ui". Gateway entries (api.*, table.*)
+ written by _syncRegistryToDb at boot are preserved untouched.
+ """
+ if not incomingEntries:
+ logger.warning("i18n xx-sync: no entries — aborting")
+ return {"added": [], "removed": [], "entriesCount": 0, "error": "No entries to sync"}
+
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "xx"})
+ if not rows:
+ now = getUtcTimestamp()
+ rec = {
+ "id": "xx",
+ "label": "Basisset (Meta)",
+ "entries": incomingEntries,
+ "status": "complete",
+ "isDefault": True,
+ "sysCreatedAt": now,
+ "sysCreatedBy": userId,
+ "sysModifiedAt": now,
+ "sysModifiedBy": userId,
+ }
+ db.recordCreate(UiLanguageSet, rec)
+ allKeys = [e["key"] for e in incomingEntries]
+ logger.info("i18n xx-master created: %d entries", len(incomingEntries))
+ return {"added": allKeys, "removed": [], "entriesCount": len(incomingEntries)}
+
+ row = dict(rows[0])
+ curEntries = _rowEntries(row)
+
+ gatewayEntries = [e for e in curEntries if e.get("context", "ui") != "ui"]
+ curUiByKey = {e["key"]: e for e in curEntries if e.get("context", "ui") == "ui"}
+ incomingByKey = {e["key"]: e for e in incomingEntries}
+
+ incomingKeys = set(incomingByKey.keys())
+ dbUiKeys = set(curUiByKey.keys())
+
+ added = sorted(incomingKeys - dbUiKeys)
+ removed = sorted(dbUiKeys - incomingKeys)
+
+ newUiEntries = [
+ {"context": e["context"], "key": e["key"], "value": e["value"]}
+ for e in incomingEntries
+ ]
+
+ if not added and not removed and all(
+ curUiByKey.get(e["key"], {}).get("value") == e["value"]
+ and curUiByKey.get(e["key"], {}).get("context") == e["context"]
+ for e in incomingEntries
+ ):
+ total = len(newUiEntries) + len(gatewayEntries)
+ return {"added": [], "removed": [], "entriesCount": total}
+
+ mergedEntries = gatewayEntries + newUiEntries
+
+ now = getUtcTimestamp()
+ row["entries"] = mergedEntries
+ if "keys" in row:
+ del row["keys"]
+ row["sysModifiedAt"] = now
+ row["sysModifiedBy"] = userId
+ db.recordModify(UiLanguageSet, "xx", row)
+
+ logger.info(
+ "i18n xx-master sync: +%d added, -%d removed (ui=%d, gateway=%d, total=%d)",
+ len(added), len(removed), len(newUiEntries), len(gatewayEntries), len(mergedEntries),
+ )
+ return {"added": added, "removed": removed, "entriesCount": len(mergedEntries)}
+
+
+# --- Public -----------------------------------------------------------------
+
+
+@router.get("/codes")
+async def list_language_codes():
+ db = _publicMgmtDb()
+ rows = db.getRecordset(UiLanguageSet)
+ out = []
+ for r in rows:
+ entries = _rowEntries(r)
+ uiCount = sum(1 for e in entries if e.get("context", "ui") == "ui")
+ gatewayCount = len(entries) - uiCount
+ code = r["id"]
+ out.append(
+ {
+ "code": code,
+ "label": r.get("label"),
+ "status": r.get("status"),
+ "isDefault": bool(r.get("isDefault")),
+ "entriesCount": len(entries),
+ "uiCount": uiCount,
+ "gatewayCount": gatewayCount,
+ "updating": code in _UPDATING_CODES,
+ }
+ )
+ return sorted(out, key=lambda x: (not x.get("isDefault"), x["code"]))
+
+
+@router.get("/sets/{code}")
+async def get_language_set(code: str):
+ db = _publicMgmtDb()
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if not rows:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Sprachset nicht gefunden"))
+ return _row_to_public(rows[0])
+
+
+# --- Auth user --------------------------------------------------------------
+
+
+class CreateLanguageBody(BaseModel):
+ code: str = Field(..., min_length=2, max_length=10)
+ label: Optional[str] = Field(default=None, max_length=80)
+
+
+def _validate_iso2_code(code: str) -> str:
+ c = code.strip().lower()
+ if not re.fullmatch(r"[a-z]{2,3}", c):
+ raise HTTPException(
+ status_code=400, detail=routeApiMsg("Nur ISO-639 Sprachcodes (2–3 Buchstaben) erlaubt.")
+ )
+ return c
+
+
+async def _translateTextMultilingualFields(db, langCode: str, langLabel: str, billingCb=None) -> int:
+ """Batch-translate all TextMultilingual fields (Role.description, Feature.label) for a new language."""
+ textsToTranslate: Dict[str, str] = {}
+
+ roles = db.getRecordset(Role)
+ for r in roles:
+ desc = r.get("description") if isinstance(r, dict) else getattr(r, "description", None)
+ if isinstance(desc, dict):
+ sourceText = desc.get("xx", "")
+ if sourceText and not desc.get(langCode):
+ textsToTranslate[f"role:{r.get('id') if isinstance(r, dict) else r.id}:description"] = sourceText
+
+ features = db.getRecordset(Feature)
+ for f in features:
+ lbl = f.get("label") if isinstance(f, dict) else getattr(f, "label", None)
+ if isinstance(lbl, dict):
+ sourceText = lbl.get("xx", "")
+ if sourceText and not lbl.get(langCode):
+ textsToTranslate[f"feature:{f.get('code') if isinstance(f, dict) else f.code}:label"] = sourceText
+
+ if not textsToTranslate:
+ return 0
+
+ keysForAi = {v: "User-generated content field" for v in textsToTranslate.values()}
+ uniqueTexts = list(set(keysForAi.keys()))
+ keysForAi = {t: "User-generated content field" for t in uniqueTexts}
+ translated = await _translateBatch(keysForAi, langLabel, langCode, billingCallback=billingCb)
+
+ count = 0
+ for compositeKey, deText in textsToTranslate.items():
+ translatedText = translated.get(deText)
+ if not translatedText:
+ continue
+ parts = compositeKey.split(":")
+ if parts[0] == "role":
+ roleId = parts[1]
+ rows = db.getRecordset(Role, recordFilter={"id": roleId})
+ if rows:
+ rec = dict(rows[0]) if not isinstance(rows[0], dict) else rows[0]
+ desc = rec.get("description", {})
+ if isinstance(desc, dict):
+ desc[langCode] = translatedText
+ rec["description"] = desc
+ db.recordModify(Role, roleId, rec)
+ count += 1
+ elif parts[0] == "feature":
+ featureCode = parts[1]
+ rows = db.getRecordset(Feature, recordFilter={"code": featureCode})
+ if rows:
+ rec = dict(rows[0]) if not isinstance(rows[0], dict) else rows[0]
+ lbl = rec.get("label", {})
+ if isinstance(lbl, dict):
+ lbl[langCode] = translatedText
+ rec["label"] = lbl
+ db.recordModify(Feature, featureCode, rec)
+ count += 1
+
+ logger.info("TextMultilingual batch translate: %d fields translated to %s", count, langCode)
+ return count
+
+
+def _run_create_language_job(userId: str, code: str, label: str, currentUser: User, mandateId: str) -> None:
+ loop = asyncio.new_event_loop()
+ try:
+ loop.run_until_complete(_run_create_language_job_async(userId, code, label, currentUser, mandateId))
+ finally:
+ loop.close()
+
+
+async def _run_create_language_job_async(userId: str, code: str, label: str, currentUser: User, mandateId: str) -> None:
+ _UPDATING_CODES.add(code)
+ try:
+ db = _publicMgmtDb()
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if not rows:
+ return
+ xxEntries = _loadMasterXxEntries(db)
+ if not xxEntries:
+ logger.error("i18n create job: no xx master entries found")
+ return
+
+ toTranslate = {e["key"]: e.get("value", "") for e in xxEntries}
+ billingCb = _makeBillingCallback(currentUser, mandateId)
+ translated = await _translateBatch(toTranslate, label, code, billingCallback=billingCb)
+
+ finalEntries = []
+ for e in xxEntries:
+ k = e["key"]
+ finalEntries.append({
+ "context": e["context"],
+ "key": k,
+ "value": translated.get(k, f"[{k}]"),
+ })
+
+ missingCount = sum(1 for e in xxEntries if e["key"] not in translated)
+ finalStatus = "complete" if missingCount == 0 else "incomplete"
+
+ now = getUtcTimestamp()
+ merged = dict(rows[0])
+ merged["entries"] = finalEntries
+ if "keys" in merged:
+ del merged["keys"]
+ merged["status"] = finalStatus
+ merged["label"] = label
+ merged["sysModifiedAt"] = now
+ merged["sysModifiedBy"] = userId
+ db.recordModify(UiLanguageSet, code, merged)
+
+ statusHint = "" if finalStatus == "complete" else f" ({missingCount} Keys ohne Übersetzung)"
+
+ tmCount = await _translateTextMultilingualFields(db, code, label, billingCb)
+
+ _createNotification(
+ userId,
+ NotificationType.SYSTEM,
+ title="Sprachset erstellt",
+ message=f"Die Sprache «{label}» ({code}) wurde per KI übersetzt{statusHint}. {tmCount} Inhaltsfelder übersetzt.",
+ )
+ await _reloadI18nCache()
+ logger.info("i18n create job done: code=%s, translated=%d/%d, tm_fields=%d", code, len(translated), len(xxEntries), tmCount)
+ except Exception as e:
+ logger.exception("create language job failed: %s", e)
+ _createNotification(
+ userId,
+ NotificationType.SYSTEM,
+ title="Sprachset fehlgeschlagen",
+ message=f"Fehler bei «{code}»: {e}",
+ )
+ finally:
+ _UPDATING_CODES.discard(code)
+
+
+@router.post("/sets")
+async def create_language_set(
+ request: Request,
+ body: CreateLanguageBody,
+ background: BackgroundTasks,
+ currentUser: User = Depends(getCurrentUser),
+):
+ mandateId = _resolveMandateIdForAiI18n(request, currentUser)
+ code = _validate_iso2_code(body.code)
+ if code == "xx":
+ raise HTTPException(status_code=400, detail=routeApiMsg("Das Basisset «xx» kann nicht manuell angelegt werden."))
+
+ db = _publicMgmtDb()
+ existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if existing:
+ raise HTTPException(status_code=409, detail=routeApiMsg("Dieses Sprachset existiert bereits."))
+
+ xxEntries = _loadMasterXxEntries(db)
+ if not xxEntries:
+ raise HTTPException(status_code=503, detail=routeApiMsg("Basisset (xx) nicht vorhanden. Bitte zuerst UI-Keys einlesen."))
+
+ resolvedLabel = (body.label or "").strip() if body.label else ""
+ if not resolvedLabel:
+ resolvedLabel = _ISO_LABELS.get(code, code)
+
+ now = getUtcTimestamp()
+ uid = str(currentUser.id)
+ rec: dict = {
+ "id": code,
+ "label": resolvedLabel,
+ "entries": [],
+ "status": "generating",
+ "isDefault": False,
+ "sysCreatedAt": now,
+ "sysCreatedBy": uid,
+ "sysModifiedAt": now,
+ "sysModifiedBy": uid,
+ }
+ db.recordCreate(UiLanguageSet, rec)
+
+ background.add_task(_run_create_language_job, uid, code, resolvedLabel, currentUser, mandateId)
+ _createNotification(
+ uid,
+ NotificationType.SYSTEM,
+ title="Sprachset wird erzeugt",
+ message=f"Die Sprache «{code}» wird im Hintergrund per KI übersetzt.",
+ )
+ return {"status": "accepted", "code": code}
+
+
+def _compute_language_sync_diff(db, code: str) -> dict:
+ """Return key sync metrics before AI translate (no DB writes)."""
+ if code == "xx":
+ raise HTTPException(status_code=400, detail=routeApiMsg("Das xx-Set wird separat synchronisiert."))
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if not rows:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Sprachset nicht gefunden"))
+ xxEntries = _loadMasterXxEntries(db)
+ if not xxEntries:
+ raise HTTPException(status_code=503, detail=routeApiMsg("Basisset (xx) nicht vorhanden."))
+ row = dict(rows[0])
+ curEntries = _rowEntries(row)
+ masterIds = {_entryId(e) for e in xxEntries}
+ currentIds = {_entryId(e) for e in curEntries}
+ return {
+ "code": code,
+ "addedCount": len(masterIds - currentIds),
+ "removedCount": len(currentIds - masterIds),
+ "masterEntryCount": len(masterIds),
+ "currentEntryCount": len(currentIds),
+ }
+
+
+def _entryId(e: dict) -> tuple:
+ """Composite identifier for an i18n entry: (key, context)."""
+ return (e["key"], e.get("context", "ui"))
+
+
+async def _syncLanguageWithXx(db, code: str, userId: Optional[str], adminUser: Optional[User] = None) -> dict:
+ """Synchronise a language set (incl. de) against the xx base set via AI.
+
+ Entries are identified by (key, context) — the same text can appear
+ with different contexts (e.g. "ui" and "api.routeXyz").
+ """
+ if code == "xx":
+ raise HTTPException(status_code=400, detail=routeApiMsg("Das xx-Set wird über 'UI-Keys einlesen' aktualisiert."))
+ _UPDATING_CODES.add(code)
+ try:
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if not rows:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Sprachset nicht gefunden"))
+ xxEntries = _loadMasterXxEntries(db)
+ if not xxEntries:
+ raise HTTPException(status_code=503, detail=routeApiMsg("Basisset (xx) nicht vorhanden."))
+
+ row = dict(rows[0])
+ curEntries = _rowEntries(row)
+ curById = {_entryId(e): e for e in curEntries}
+ xxById = {_entryId(e): e for e in xxEntries}
+
+ masterIds = set(xxById.keys())
+ currentIds = set(curById.keys())
+ removedIds = currentIds - masterIds
+ addedIds = masterIds - currentIds
+
+ translatedCount = 0
+ if addedIds:
+ toTranslate = {xxById[eid]["key"]: xxById[eid].get("value", "") for eid in addedIds}
+ langLabel = row.get("label") or code
+ billingCb = None
+ if adminUser:
+ memberIds = _userMemberMandateIds(adminUser)
+ if memberIds:
+ billingCb = _makeBillingCallback(adminUser, memberIds[0])
+ try:
+ translated = await _translateBatch(toTranslate, langLabel, code, billingCallback=billingCb)
+ translatedCount = sum(1 for eid in addedIds if xxById[eid]["key"] in translated)
+ except Exception as e:
+ logger.error("AI translation during sync failed for %s: %s", code, e)
+ translated = {}
+
+ for eid in addedIds:
+ xxEntry = xxById[eid]
+ curById[eid] = {
+ "context": xxEntry["context"],
+ "key": xxEntry["key"],
+ "value": translated.get(xxEntry["key"], f"[{xxEntry['key']}]"),
+ }
+
+ for eid in removedIds:
+ del curById[eid]
+
+ for eid in masterIds & currentIds:
+ curById[eid]["context"] = xxById[eid]["context"]
+
+ newEntries = sorted(curById.values(), key=lambda e: (e["key"].lower(), e.get("context", "")))
+
+ now = getUtcTimestamp()
+ untranslated = len(addedIds) - translatedCount
+ row["entries"] = newEntries
+ if "keys" in row:
+ del row["keys"]
+ row["status"] = "complete" if untranslated == 0 else "incomplete"
+ row["sysModifiedAt"] = now
+ row["sysModifiedBy"] = userId
+ db.recordModify(UiLanguageSet, code, row)
+ return {
+ "code": code,
+ "added": sorted({xxById[eid]["key"] for eid in addedIds}),
+ "removed": sorted({eid[0] for eid in removedIds}),
+ "translated": translatedCount,
+ "entriesCount": len(newEntries),
+ }
+ finally:
+ _UPDATING_CODES.discard(code)
+
+
+@router.put("/sets/sync-xx")
+async def sync_xx_master(
+ request: Request,
+ adminUser: User = Depends(requireSysAdminRole),
+):
+ """Synchronise the xx base set from the frontend build artefact.
+
+ Expects JSON body: {"entries": [{"context":"ui","key":"…","value":"…"}, …]}
+ Falls back to local codebase scan if no body provided (dev mode).
+ """
+ db = getMgmtInterface(adminUser, mandateId=None).db
+ fromBody = await _readOptionalEntriesFromBody(request)
+ entries = fromBody if fromBody is not None else _scanCodebaseKeys()
+ result = _syncXxMaster(db, str(adminUser.id), entries)
+ await _reloadI18nCache()
+ return result
+
+
+@router.get("/sets/{code}/sync-diff")
+async def get_language_sync_diff(
+ code: str,
+ adminUser: User = Depends(requireSysAdminRole),
+):
+ """How many keys would be added/removed vs xx before running a full sync (SysAdmin)."""
+ c = code.strip().lower()
+ if c in ("update-all", "sync-xx", "sync-de"):
+ raise HTTPException(status_code=400, detail=routeApiMsg("Ungültiger Sprachcode."))
+ db = getMgmtInterface(adminUser, mandateId=None).db
+ return _compute_language_sync_diff(db, c)
+
+
+@router.put("/sets/{code}")
+async def update_language_set(
+ code: str,
+ adminUser: User = Depends(requireSysAdminRole),
+):
+ c = code.strip().lower()
+ if c in ("update-all", "sync-xx", "sync-de"):
+ raise HTTPException(status_code=400, detail=routeApiMsg("Ungültiger Sprachcode."))
+ if c == "xx":
+ raise HTTPException(status_code=400, detail=routeApiMsg("Das xx-Set wird über 'UI-Keys einlesen' aktualisiert."))
+ db = getMgmtInterface(adminUser, mandateId=None).db
+ result = await _syncLanguageWithXx(db, c, str(adminUser.id), adminUser=adminUser)
+ await _reloadI18nCache()
+ return result
+
+
+@router.delete("/sets/{code}")
+async def delete_language_set(
+ code: str,
+ adminUser: User = Depends(requireSysAdminRole),
+):
+ c = code.strip().lower()
+ if c in _PROTECTED_CODES:
+ raise HTTPException(status_code=400, detail=f"Das Set «{c}» darf nicht gelöscht werden.")
+ db = getMgmtInterface(adminUser, mandateId=None).db
+ ok = db.recordDelete(UiLanguageSet, c)
+ if not ok:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Sprachset nicht gefunden"))
+ await _reloadI18nCache()
+ return {"deleted": c}
+
+
+@router.get("/sets/{code}/download", dependencies=[Depends(getCurrentUser)])
+async def download_language_set(
+ code: str,
+ currentUser: User = Depends(getCurrentUser),
+):
+ db = _publicMgmtDb()
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code.strip().lower()})
+ if not rows:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Sprachset nicht gefunden"))
+ payload = _row_to_public(rows[0])
+ raw = json.dumps(payload, ensure_ascii=False, indent=2)
+ return Response(
+ content=raw,
+ media_type="application/json",
+ headers={
+ "Content-Disposition": f'attachment; filename="ui-language-{code}.json"'
+ },
+ )
+
+
+# --- Export / Import (full DB) -----------------------------------------------
+
+
+@router.get("/export")
+async def export_all_language_sets(
+ adminUser: User = Depends(requireSysAdminRole),
+):
+ db = getMgmtInterface(adminUser, mandateId=None).db
+ rows = db.getRecordset(UiLanguageSet)
+ payload = []
+ for r in rows:
+ entries = _rowEntries(r)
+ payload.append({
+ "id": r["id"],
+ "label": r.get("label", ""),
+ "entries": entries,
+ "status": r.get("status", "complete"),
+ "isDefault": bool(r.get("isDefault", False)),
+ })
+ payload.sort(key=lambda x: (not x.get("isDefault"), x["id"]))
+ raw = json.dumps(payload, ensure_ascii=False, indent=2)
+ return Response(
+ content=raw,
+ media_type="application/json",
+ headers={
+ "Content-Disposition": 'attachment; filename="ui-languages-export.json"'
+ },
+ )
+
+
+@router.post("/import")
+async def import_language_sets(
+ file: UploadFile = File(...),
+ adminUser: User = Depends(requireSysAdminRole),
+):
+ if not file.filename or not file.filename.endswith(".json"):
+ raise HTTPException(status_code=400, detail=routeApiMsg("Nur .json-Dateien erlaubt."))
+
+ try:
+ raw = await file.read()
+ data = json.loads(raw.decode("utf-8"))
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
+ raise HTTPException(status_code=400, detail=f"Ungültiges JSON: {e}")
+
+ if not isinstance(data, list):
+ raise HTTPException(status_code=400, detail=routeApiMsg("JSON muss ein Array von Sprachsets sein."))
+
+ db = getMgmtInterface(adminUser, mandateId=None).db
+ now = getUtcTimestamp()
+ uid = str(adminUser.id)
+ created = []
+ updated = []
+
+ for entry in data:
+ if not isinstance(entry, dict):
+ continue
+ code = str(entry.get("id", "")).strip().lower()
+ if not code or len(code) < 2:
+ continue
+
+ entries = entry.get("entries")
+ if not isinstance(entries, list):
+ keys = entry.get("keys")
+ if isinstance(keys, dict):
+ entries = [{"context": "ui", "key": k, "value": v} for k, v in keys.items()]
+ else:
+ continue
+
+ label = str(entry.get("label", code))
+ entryStatus = str(entry.get("status", "complete"))
+ isDefault = bool(entry.get("isDefault", False))
+
+ existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code})
+ if existing:
+ row = dict(existing[0])
+ row["entries"] = entries
+ if "keys" in row:
+ del row["keys"]
+ row["label"] = label
+ row["status"] = entryStatus
+ row["isDefault"] = isDefault
+ row["sysModifiedAt"] = now
+ row["sysModifiedBy"] = uid
+ db.recordModify(UiLanguageSet, code, row)
+ updated.append(code)
+ else:
+ rec = {
+ "id": code,
+ "label": label,
+ "entries": entries,
+ "status": entryStatus,
+ "isDefault": isDefault,
+ "sysCreatedAt": now,
+ "sysCreatedBy": uid,
+ "sysModifiedAt": now,
+ "sysModifiedBy": uid,
+ }
+ db.recordCreate(UiLanguageSet, rec)
+ created.append(code)
+
+ logger.info("i18n import: created=%s, updated=%s", created, updated)
+ await _reloadI18nCache()
+ return {"created": created, "updated": updated, "totalProcessed": len(created) + len(updated)}
+
+
+# ---------------------------------------------------------------------------
+# Phase 7b: translate-field — on-demand translation for TextMultilingual fields
+# ---------------------------------------------------------------------------
+
+_TRANSLATE_FIELD_MAX_LEN = 2000
+
+
+class _TargetLang(BaseModel):
+ code: str = Field(..., min_length=2, max_length=10)
+ label: str = Field(default="")
+
+
+class TranslateFieldRequest(BaseModel):
+ sourceText: str = Field(..., min_length=1, max_length=_TRANSLATE_FIELD_MAX_LEN)
+ sourceLang: str = Field(default="de", min_length=2, max_length=5)
+ targetLangs: List[_TargetLang] = Field(..., min_length=1)
+
+
+@router.post("/translate-field")
+async def translateField(
+ body: TranslateFieldRequest,
+ request: Request,
+ currentUser: User = Depends(getCurrentUser),
+):
+ """Translate a single text into one or more target languages (for TextMultilingual fields)."""
+ targets = [t for t in body.targetLangs if t.code != body.sourceLang]
+ if not targets:
+ return {"translations": {}}
+
+ mandateId = _resolveMandateIdForAiI18n(request, currentUser)
+ billingCb = _makeBillingCallback(currentUser, mandateId)
+
+ results: Dict[str, str] = {}
+ for target in targets:
+ targetLabel = target.label or _ISO_LABELS.get(target.code, target.code)
+ keysToTranslate = {body.sourceText: "TextMultilingual field"}
+ translated = await _translateBatch(keysToTranslate, targetLabel, target.code, billingCb)
+ val = translated.get(body.sourceText, "")
+ if val:
+ results[target.code] = val
+
+ return {"translations": results}
diff --git a/modules/routes/routeInvitations.py b/modules/routes/routeInvitations.py
index 6e34eb88..9354c31c 100644
--- a/modules/routes/routeInvitations.py
+++ b/modules/routes/routeInvitations.py
@@ -14,13 +14,19 @@ from fastapi import APIRouter, HTTPException, Depends, Request, Query
from typing import List, Dict, Any, Optional
from fastapi import status
import logging
+import json
+import math
from pydantic import BaseModel, Field, model_validator
from modules.auth import limiter, getRequestContext, RequestContext, getCurrentUser
from modules.datamodels.datamodelUam import User
+from modules.datamodels.datamodelPagination import PaginationParams, PaginationMetadata, normalize_pagination_dict
+from modules.routes.routeDataUsers import _applyFiltersAndSort
from modules.datamodels.datamodelInvitation import Invitation
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.shared.timeUtils import getUtcTimestamp
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeInvitations")
logger = logging.getLogger(__name__)
@@ -157,7 +163,7 @@ def create_invitation(
if not context.mandateId:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required for mandate-level invitations"
+ detail=routeApiMsg("X-Mandate-Id header is required for mandate-level invitations")
)
mandateId = str(context.mandateId)
# Validate roles are mandate-level (no featureInstanceId)
@@ -184,12 +190,12 @@ def create_invitation(
if str(context.mandateId) != mandateId:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this mandate"
+ detail=routeApiMsg("Access denied to this mandate")
)
if not _hasMandateAdminRole(context):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to create invitations"
+ detail=routeApiMsg("Mandate-Admin role required to create invitations")
)
# Calculate expiration time
@@ -394,15 +400,16 @@ def create_invitation(
)
-@router.get("/", response_model=List[Dict[str, Any]])
+@router.get("/")
@limiter.limit("60/minute")
def list_invitations(
request: Request,
frontendUrl: str = Query(..., description="Frontend URL for building invite links (provided by frontend)"),
includeUsed: bool = Query(False, description="Include already used invitations"),
includeExpired: bool = Query(False, description="Include expired invitations"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
context: RequestContext = Depends(getRequestContext)
-) -> List[Dict[str, Any]]:
+):
"""
List invitations for the current mandate.
@@ -422,14 +429,14 @@ def list_invitations(
if not context.mandateId:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required"
+ detail=routeApiMsg("X-Mandate-Id header is required")
)
# Check mandate admin permission
if not _hasMandateAdminRole(context):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to list invitations"
+ detail=routeApiMsg("Mandate-Admin role required to list invitations")
)
try:
@@ -468,6 +475,30 @@ def list_invitations(
"isUsedUp": currentUses >= maxUses
})
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except (json.JSONDecodeError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}")
+
+ if paginationParams:
+ filtered = _applyFiltersAndSort(result, paginationParams)
+ totalItems = len(filtered)
+ totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0
+ startIdx = (paginationParams.page - 1) * paginationParams.pageSize
+ endIdx = startIdx + paginationParams.pageSize
+ return {
+ "items": filtered[startIdx:endIdx],
+ "pagination": PaginationMetadata(
+ currentPage=paginationParams.page, pageSize=paginationParams.pageSize,
+ totalItems=totalItems, totalPages=totalPages,
+ sort=paginationParams.sort, filters=paginationParams.filters,
+ ).model_dump(),
+ }
return result
except HTTPException:
@@ -493,9 +524,9 @@ def get_invitation_filter_values(
) -> list:
"""Return distinct filter values for a column in invitations."""
if not context.mandateId:
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="X-Mandate-Id header is required")
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=routeApiMsg("X-Mandate-Id header is required"))
if not _hasMandateAdminRole(context):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Mandate-Admin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Mandate-Admin role required"))
try:
from modules.routes.routeDataUsers import _handleFilterValuesRequest
rootInterface = getRootInterface()
@@ -546,14 +577,14 @@ def revoke_invitation(
if not context.mandateId:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="X-Mandate-Id header is required"
+ detail=routeApiMsg("X-Mandate-Id header is required")
)
# Check mandate admin permission
if not _hasMandateAdminRole(context):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Mandate-Admin role required to revoke invitations"
+ detail=routeApiMsg("Mandate-Admin role required to revoke invitations")
)
try:
@@ -572,14 +603,14 @@ def revoke_invitation(
if str(invitation.mandateId) != str(context.mandateId):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Access denied to this invitation"
+ detail=routeApiMsg("Access denied to this invitation")
)
# Already revoked?
if invitation.revokedAt:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation is already revoked"
+ detail=routeApiMsg("Invitation is already revoked")
)
# Revoke invitation
@@ -752,14 +783,14 @@ def accept_invitation(
if not invitation:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Invitation not found"
+ detail=routeApiMsg("Invitation not found")
)
# Validate invitation
if invitation.revokedAt:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has been revoked"
+ detail=routeApiMsg("Invitation has been revoked")
)
currentTime = getUtcTimestamp()
@@ -767,7 +798,7 @@ def accept_invitation(
if expiresAt < currentTime:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has expired"
+ detail=routeApiMsg("Invitation has expired")
)
currentUses = invitation.currentUses or 0
@@ -775,7 +806,7 @@ def accept_invitation(
if currentUses >= maxUses:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has reached maximum uses"
+ detail=routeApiMsg("Invitation has reached maximum uses")
)
# Validate user matches - invitation is bound by username or email
@@ -804,7 +835,7 @@ def accept_invitation(
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has no target user or email"
+ detail=routeApiMsg("Invitation has no target user or email")
)
mandateId = str(invitation.mandateId) if invitation.mandateId else None
diff --git a/modules/routes/routeMessaging.py b/modules/routes/routeMessaging.py
deleted file mode 100644
index 42e15f0e..00000000
--- a/modules/routes/routeMessaging.py
+++ /dev/null
@@ -1,512 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-from fastapi import APIRouter, HTTPException, Depends, Body, Path, Request, Query
-from typing import List, Dict, Any, Optional
-from fastapi import status
-import logging
-import json
-
-# Import auth module
-from modules.auth import limiter, getCurrentUser, getRequestContext, RequestContext
-from modules.datamodels.datamodelRbac import Role
-
-# Import interfaces
-import modules.interfaces.interfaceDbManagement as interfaceDbManagement
-from modules.datamodels.datamodelMessaging import (
- MessagingSubscription,
- MessagingSubscriptionRegistration,
- MessagingDelivery,
- MessagingChannel,
- MessagingEventParameters,
- MessagingSubscriptionExecutionResult
-)
-from modules.datamodels.datamodelUam import User
-from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
-
-# Configure logger
-logger = logging.getLogger(__name__)
-
-# Create router for messaging endpoints
-router = APIRouter(
- prefix="/api/messaging",
- tags=["Messaging"],
- responses={404: {"description": "Not found"}}
-)
-
-
-# Subscription Endpoints
-
-@router.get("/subscriptions", response_model=PaginatedResponse[MessagingSubscription])
-@limiter.limit("60/minute")
-def get_subscriptions(
- request: Request,
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- currentUser: User = Depends(getCurrentUser)
-) -> PaginatedResponse[MessagingSubscription]:
- """Get subscriptions with optional pagination, sorting, and filtering."""
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict) if paginationDict else None
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid pagination parameter: {str(e)}"
- )
-
- managementInterface = interfaceDbManagement.getInterface(currentUser)
- result = managementInterface.getAllSubscriptions(pagination=paginationParams)
-
- if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=result.totalItems,
- totalPages=result.totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters
- )
- )
- else:
- return PaginatedResponse(
- items=result,
- pagination=None
- )
-
-
-@router.post("/subscriptions", response_model=MessagingSubscription)
-@limiter.limit("60/minute")
-def create_subscription(
- request: Request,
- subscription: MessagingSubscription,
- currentUser: User = Depends(getCurrentUser)
-) -> MessagingSubscription:
- """Create a new subscription"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- subscriptionData = subscription.model_dump(exclude={"id"})
- newSubscription = managementInterface.createSubscription(subscriptionData)
-
- return MessagingSubscription(**newSubscription)
-
-
-@router.get("/subscriptions/{subscriptionId}", response_model=MessagingSubscription)
-@limiter.limit("60/minute")
-def get_subscription(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription"),
- currentUser: User = Depends(getCurrentUser)
-) -> MessagingSubscription:
- """Get a specific subscription"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- subscription = managementInterface.getSubscription(subscriptionId)
- if not subscription:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Subscription with ID {subscriptionId} not found"
- )
-
- return subscription
-
-
-@router.put("/subscriptions/{subscriptionId}", response_model=MessagingSubscription)
-@limiter.limit("60/minute")
-def update_subscription(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription to update"),
- subscriptionData: MessagingSubscription = Body(...),
- currentUser: User = Depends(getCurrentUser)
-) -> MessagingSubscription:
- """Update an existing subscription"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- existingSubscription = managementInterface.getSubscription(subscriptionId)
- if not existingSubscription:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Subscription with ID {subscriptionId} not found"
- )
-
- updateData = subscriptionData.model_dump(exclude={"id", "subscriptionId"})
- updatedSubscription = managementInterface.updateSubscription(subscriptionId, updateData)
-
- if not updatedSubscription:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error updating the subscription"
- )
-
- return MessagingSubscription(**updatedSubscription)
-
-
-@router.delete("/subscriptions/{subscriptionId}", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-def delete_subscription(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription to delete"),
- currentUser: User = Depends(getCurrentUser)
-) -> Dict[str, Any]:
- """Delete a subscription"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- existingSubscription = managementInterface.getSubscription(subscriptionId)
- if not existingSubscription:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Subscription with ID {subscriptionId} not found"
- )
-
- success = managementInterface.deleteSubscription(subscriptionId)
- if not success:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error deleting the subscription"
- )
-
- return {"message": f"Subscription with ID {subscriptionId} successfully deleted"}
-
-
-# Registration Endpoints
-
-@router.get("/subscriptions/{subscriptionId}/registrations", response_model=PaginatedResponse[MessagingSubscriptionRegistration])
-@limiter.limit("60/minute")
-def get_subscription_registrations(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription"),
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- currentUser: User = Depends(getCurrentUser)
-) -> PaginatedResponse[MessagingSubscriptionRegistration]:
- """Get registrations for a subscription"""
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict) if paginationDict else None
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid pagination parameter: {str(e)}"
- )
-
- managementInterface = interfaceDbManagement.getInterface(currentUser)
- result = managementInterface.getAllRegistrations(
- subscriptionId=subscriptionId,
- pagination=paginationParams
- )
-
- if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=result.totalItems,
- totalPages=result.totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters
- )
- )
- else:
- return PaginatedResponse(
- items=result,
- pagination=None
- )
-
-
-@router.post("/subscriptions/{subscriptionId}/subscribe", response_model=MessagingSubscriptionRegistration)
-@limiter.limit("60/minute")
-def subscribe_user(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription"),
- channel: MessagingChannel = Body(..., embed=True),
- channelConfig: str = Body(..., embed=True),
- currentUser: User = Depends(getCurrentUser)
-) -> MessagingSubscriptionRegistration:
- """Subscribe user to a subscription with a specific channel"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- registration = managementInterface.subscribeUser(
- subscriptionId=subscriptionId,
- userId=currentUser.id,
- channel=channel,
- channelConfig=channelConfig
- )
-
- return MessagingSubscriptionRegistration(**registration)
-
-
-@router.delete("/subscriptions/{subscriptionId}/unsubscribe", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-def unsubscribe_user(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription"),
- channel: MessagingChannel = Body(..., embed=True),
- currentUser: User = Depends(getCurrentUser)
-) -> Dict[str, Any]:
- """Unsubscribe user from a subscription for a specific channel"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- success = managementInterface.unsubscribeUser(
- subscriptionId=subscriptionId,
- userId=currentUser.id,
- channel=channel
- )
-
- if not success:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail="Registration not found"
- )
-
- return {"message": f"Successfully unsubscribed from {subscriptionId} for channel {channel.value}"}
-
-
-@router.get("/registrations", response_model=PaginatedResponse[MessagingSubscriptionRegistration])
-@limiter.limit("60/minute")
-def get_my_registrations(
- request: Request,
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- currentUser: User = Depends(getCurrentUser)
-) -> PaginatedResponse[MessagingSubscriptionRegistration]:
- """Get own registrations"""
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict) if paginationDict else None
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid pagination parameter: {str(e)}"
- )
-
- managementInterface = interfaceDbManagement.getInterface(currentUser)
- result = managementInterface.getAllRegistrations(
- userId=currentUser.id,
- pagination=paginationParams
- )
-
- if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=result.totalItems,
- totalPages=result.totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters
- )
- )
- else:
- return PaginatedResponse(
- items=result,
- pagination=None
- )
-
-
-@router.put("/registrations/{registrationId}", response_model=MessagingSubscriptionRegistration)
-@limiter.limit("60/minute")
-def update_registration(
- request: Request,
- registrationId: str = Path(..., description="ID of the registration to update"),
- registrationData: MessagingSubscriptionRegistration = Body(...),
- currentUser: User = Depends(getCurrentUser)
-) -> MessagingSubscriptionRegistration:
- """Update a registration"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- existingRegistration = managementInterface.getRegistration(registrationId)
- if not existingRegistration:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Registration with ID {registrationId} not found"
- )
-
- updateData = registrationData.model_dump(exclude={"id", "subscriptionId", "userId"})
- updatedRegistration = managementInterface.updateRegistration(registrationId, updateData)
-
- if not updatedRegistration:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error updating the registration"
- )
-
- return MessagingSubscriptionRegistration(**updatedRegistration)
-
-
-@router.delete("/registrations/{registrationId}", response_model=Dict[str, Any])
-@limiter.limit("60/minute")
-def delete_registration(
- request: Request,
- registrationId: str = Path(..., description="ID of the registration to delete"),
- currentUser: User = Depends(getCurrentUser)
-) -> Dict[str, Any]:
- """Delete a registration"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- existingRegistration = managementInterface.getRegistration(registrationId)
- if not existingRegistration:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Registration with ID {registrationId} not found"
- )
-
- success = managementInterface.deleteRegistration(registrationId)
- if not success:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Error deleting the registration"
- )
-
- return {"message": f"Registration with ID {registrationId} successfully deleted"}
-
-
-# Trigger Endpoints
-
-def _getTriggerKey(request: Request) -> str:
- """Custom key function for trigger rate limiting per subscriptionId"""
- subscriptionId = request.path_params.get("subscriptionId", "unknown")
- return f"{request.client.host}:{subscriptionId}"
-
-
-@router.post("/trigger/{subscriptionId}", response_model=MessagingSubscriptionExecutionResult)
-@limiter.limit("60/minute", key_func=_getTriggerKey)
-def trigger_subscription(
- request: Request,
- subscriptionId: str = Path(..., description="ID of the subscription to trigger"),
- eventParameters: Dict[str, Any] = Body(...),
- context: RequestContext = Depends(getRequestContext)
-) -> MessagingSubscriptionExecutionResult:
- """
- Trigger a subscription with event parameters.
-
- Requires Mandate-Admin role or SysAdmin.
- """
- # RBAC-Check: Admin or Mandate-Admin can trigger
- if not _hasTriggerPermission(context):
- raise HTTPException(
- status_code=status.HTTP_403_FORBIDDEN,
- detail="Admin or Mandate-Admin role required to trigger subscriptions"
- )
-
- # Get messaging service from request app state
- from modules.serviceHub import getInterface as getServicesInterface
- services = getServicesInterface(context.user, None, mandateId=str(context.mandateId))
-
- # Konvertiere Dict zu Pydantic Model
- eventParams = MessagingEventParameters(triggerData=eventParameters)
-
- executionResult = services.messaging.executeSubscription(subscriptionId, eventParams)
- return executionResult
-
-
-def _hasTriggerPermission(context: RequestContext) -> bool:
- """
- Check if user has permission to trigger subscriptions.
- Requires admin or mandate-admin role.
- """
- if context.hasSysAdminRole:
- return True
-
- if not context.roleIds:
- return False
-
- try:
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
-
- for roleId in context.roleIds:
- role = rootInterface.getRole(roleId)
- if role:
- roleLabel = role.roleLabel
- # Admin role at mandate level or system admin
- if roleLabel in ("admin", "sysadmin"):
- return True
-
- return False
-
- except Exception as e:
- logger.error(f"Error checking trigger permission: {e}")
- return False
-
-
-# Delivery Endpoints
-
-@router.get("/deliveries", response_model=PaginatedResponse[MessagingDelivery])
-@limiter.limit("60/minute")
-def get_deliveries(
- request: Request,
- subscriptionId: Optional[str] = Query(None, description="Filter by subscription ID"),
- pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"),
- currentUser: User = Depends(getCurrentUser)
-) -> PaginatedResponse[MessagingDelivery]:
- """Get delivery history"""
- paginationParams = None
- if pagination:
- try:
- paginationDict = json.loads(pagination)
- if paginationDict:
- paginationDict = normalize_pagination_dict(paginationDict)
- paginationParams = PaginationParams(**paginationDict) if paginationDict else None
- except (json.JSONDecodeError, ValueError) as e:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid pagination parameter: {str(e)}"
- )
-
- managementInterface = interfaceDbManagement.getInterface(currentUser)
- result = managementInterface.getDeliveries(
- subscriptionId=subscriptionId,
- userId=currentUser.id, # Users can only see their own deliveries
- pagination=paginationParams
- )
-
- if paginationParams:
- return PaginatedResponse(
- items=result.items,
- pagination=PaginationMetadata(
- currentPage=paginationParams.page,
- pageSize=paginationParams.pageSize,
- totalItems=result.totalItems,
- totalPages=result.totalPages,
- sort=paginationParams.sort,
- filters=paginationParams.filters
- )
- )
- else:
- return PaginatedResponse(
- items=result,
- pagination=None
- )
-
-
-@router.get("/deliveries/{deliveryId}", response_model=MessagingDelivery)
-@limiter.limit("60/minute")
-def get_delivery(
- request: Request,
- deliveryId: str = Path(..., description="ID of the delivery"),
- currentUser: User = Depends(getCurrentUser)
-) -> MessagingDelivery:
- """Get a specific delivery"""
- managementInterface = interfaceDbManagement.getInterface(currentUser)
-
- delivery = managementInterface.getDelivery(deliveryId)
- if not delivery:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Delivery with ID {deliveryId} not found"
- )
-
- return delivery
-
diff --git a/modules/routes/routeNotifications.py b/modules/routes/routeNotifications.py
index a533a535..41d7fe26 100644
--- a/modules/routes/routeNotifications.py
+++ b/modules/routes/routeNotifications.py
@@ -22,6 +22,8 @@ from modules.datamodels.datamodelNotification import (
from modules.datamodels.datamodelRbac import Role
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.shared.timeUtils import getUtcTimestamp
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeNotifications")
logger = logging.getLogger(__name__)
@@ -238,14 +240,14 @@ def markAsRead(
if not notification:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Notification not found"
+ detail=routeApiMsg("Notification not found")
)
# Verify ownership
if str(notification.userId) != str(currentUser.id):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Not authorized to access this notification"
+ detail=routeApiMsg("Not authorized to access this notification")
)
# Update status
@@ -332,21 +334,21 @@ def executeAction(
if not notification:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Notification not found"
+ detail=routeApiMsg("Notification not found")
)
# Verify ownership
if str(notification.userId) != str(currentUser.id):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Not authorized to access this notification"
+ detail=routeApiMsg("Not authorized to access this notification")
)
# Check if already actioned
if notification.status == NotificationStatus.ACTIONED.value:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Notification has already been actioned"
+ detail=routeApiMsg("Notification has already been actioned")
)
# Validate action exists
@@ -416,7 +418,7 @@ def _handleInvitationAction(
if not invitationId:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="No invitation reference found"
+ detail=routeApiMsg("No invitation reference found")
)
# Get the invitation (Pydantic model)
@@ -425,7 +427,7 @@ def _handleInvitationAction(
if not invitation:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Invitation not found"
+ detail=routeApiMsg("Invitation not found")
)
# Verify user matches (username or email)
@@ -436,18 +438,18 @@ def _handleInvitationAction(
if currentUser.username != targetUsername:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="This invitation is for a different user"
+ detail=routeApiMsg("This invitation is for a different user")
)
elif invitationEmail:
if not currentUserEmail or currentUserEmail != invitationEmail:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="This invitation is for a different user"
+ detail=routeApiMsg("This invitation is for a different user")
)
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has no target user or email"
+ detail=routeApiMsg("Invitation has no target user or email")
)
# Check if invitation is still valid
@@ -456,13 +458,13 @@ def _handleInvitationAction(
if expiresAt < currentTime:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has expired"
+ detail=routeApiMsg("Invitation has expired")
)
if invitation.revokedAt:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has been revoked"
+ detail=routeApiMsg("Invitation has been revoked")
)
currentUses = invitation.currentUses or 0
@@ -470,7 +472,7 @@ def _handleInvitationAction(
if currentUses >= maxUses:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invitation has reached maximum uses"
+ detail=routeApiMsg("Invitation has reached maximum uses")
)
if actionId == "accept":
@@ -565,14 +567,14 @@ def deleteNotification(
if not notification:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Notification not found"
+ detail=routeApiMsg("Notification not found")
)
# Verify ownership
if str(notification.userId) != str(currentUser.id):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Not authorized to delete this notification"
+ detail=routeApiMsg("Not authorized to delete this notification")
)
# Mark as dismissed (soft delete)
diff --git a/modules/routes/routeRealEstate.py b/modules/routes/routeRealEstate.py
index a3466aca..aa3d98f4 100644
--- a/modules/routes/routeRealEstate.py
+++ b/modules/routes/routeRealEstate.py
@@ -64,6 +64,8 @@ from modules.routes.routeRealEstateScraping import (
# Import attribute utilities for model schema
from modules.shared.attributeUtils import getModelAttributeDefinitions
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeRealEstate")
# Configure logger
logger = logging.getLogger(__name__)
@@ -308,7 +310,7 @@ async def update_project(
raise HTTPException(status_code=404, detail=f"Project '{projectId}' not found")
updated = interface.updateProjekt(projectId, data)
if not updated:
- raise HTTPException(status_code=500, detail="Update failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Update failed"))
return updated
@@ -329,7 +331,7 @@ async def delete_project(
if not projekt or str(getattr(projekt, "featureInstanceId", None)) != instanceId:
raise HTTPException(status_code=404, detail=f"Project '{projectId}' not found")
if not interface.deleteProjekt(projectId):
- raise HTTPException(status_code=500, detail="Delete failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Delete failed"))
# ----- Parcels CRUD -----
@@ -429,7 +431,7 @@ async def update_parcel(
raise HTTPException(status_code=404, detail=f"Parcel '{parcelId}' not found")
updated = interface.updateParzelle(parcelId, data)
if not updated:
- raise HTTPException(status_code=500, detail="Update failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Update failed"))
return updated
@@ -450,7 +452,7 @@ async def delete_parcel(
if not parzelle or str(getattr(parzelle, "featureInstanceId", None)) != instanceId:
raise HTTPException(status_code=404, detail=f"Parcel '{parcelId}' not found")
if not interface.deleteParzelle(parcelId):
- raise HTTPException(status_code=500, detail="Delete failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Delete failed"))
# ============================================================================
@@ -495,7 +497,7 @@ async def process_command(
logger.warning(f"CSRF token missing for POST /api/realestate/command from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -503,7 +505,7 @@ async def process_command(
logger.warning(f"Invalid CSRF token format for POST /api/realestate/command from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -513,7 +515,7 @@ async def process_command(
logger.warning(f"CSRF token is not a valid hex string for POST /api/realestate/command from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Processing command request from user {currentUser.id} (mandate: {currentUser.mandateId})")
@@ -566,7 +568,7 @@ async def get_available_tables(
logger.warning(f"CSRF token missing for GET /api/realestate/tables from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -574,7 +576,7 @@ async def get_available_tables(
logger.warning(f"Invalid CSRF token format for GET /api/realestate/tables from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -584,7 +586,7 @@ async def get_available_tables(
logger.warning(f"CSRF token is not a valid hex string for GET /api/realestate/tables from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Getting available tables for user {currentUser.id} (mandate: {currentUser.mandateId})")
@@ -675,7 +677,7 @@ async def get_table_data(
logger.warning(f"CSRF token missing for GET /api/realestate/table/{table} from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -683,7 +685,7 @@ async def get_table_data(
logger.warning(f"Invalid CSRF token format for GET /api/realestate/table/{table} from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -693,7 +695,7 @@ async def get_table_data(
logger.warning(f"CSRF token is not a valid hex string for GET /api/realestate/table/{table} from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Getting table data for '{table}' from user {currentUser.id} (mandate: {currentUser.mandateId})")
@@ -844,7 +846,7 @@ async def create_table_record(
logger.warning(f"CSRF token missing for POST /api/realestate/table/{table} from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -852,7 +854,7 @@ async def create_table_record(
logger.warning(f"Invalid CSRF token format for POST /api/realestate/table/{table} from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -862,7 +864,7 @@ async def create_table_record(
logger.warning(f"CSRF token is not a valid hex string for POST /api/realestate/table/{table} from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Special handling for Projekt with parcel data
@@ -874,7 +876,7 @@ async def create_table_record(
if not label:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="label is required"
+ detail=routeApiMsg("label is required")
)
status_prozess = data.get("statusProzess", "Eingang")
@@ -887,7 +889,7 @@ async def create_table_record(
if not isinstance(parzellen_data, list):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="parzellen must be an array"
+ detail=routeApiMsg("parzellen must be an array")
)
elif "parzelle" in data:
# Single parcel (backward compatibility)
@@ -898,7 +900,7 @@ async def create_table_record(
if not parzellen_data:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="parzelle or parzellen data is required"
+ detail=routeApiMsg("parzelle or parzellen data is required")
)
# Use helper function to create project with parcel data
@@ -1073,7 +1075,7 @@ async def search_parcel(
logger.warning(f"CSRF token missing for GET /api/realestate/parcel/search from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
logger.info(f"Searching parcel for user {currentUser.id} (mandate: {currentUser.mandateId}) with location: {location}")
@@ -2059,21 +2061,21 @@ async def add_parcel_to_project(
logger.warning(f"CSRF token missing for POST /api/realestate/projekt/{projekt_id}/add-parcel from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Validate CSRF token format
if not isinstance(csrf_token, str) or len(csrf_token) < 16 or len(csrf_token) > 64:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
try:
int(csrf_token, 16)
except ValueError:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Adding parcel to project {projekt_id} for user {currentUser.id} (mandate: {currentUser.mandateId})")
@@ -2294,7 +2296,7 @@ async def get_bzo_information(
logger.warning(f"CSRF token missing for GET /api/realestate/bzo-information from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -2302,7 +2304,7 @@ async def get_bzo_information(
logger.warning(f"Invalid CSRF token format for GET /api/realestate/bzo-information from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -2312,7 +2314,7 @@ async def get_bzo_information(
logger.warning(f"CSRF token is not a valid hex string for GET /api/realestate/bzo-information from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Extracting BZO information for Gemeinde '{gemeinde}', Bauzone '{bauzone}' (user: {currentUser.id}, mandate: {currentUser.mandateId})")
diff --git a/modules/routes/routeRealEstateScraping.py b/modules/routes/routeRealEstateScraping.py
index 4b8d2d0d..abb54299 100644
--- a/modules/routes/routeRealEstateScraping.py
+++ b/modules/routes/routeRealEstateScraping.py
@@ -36,6 +36,8 @@ from modules.connectors.connectorOerebWfs import OerebWfsConnector
# Import Tavily connector for BZO document search
from modules.aicore.aicorePluginTavily import AiTavily
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeRealEstateScraping")
# Configure logger
logger = logging.getLogger(__name__)
@@ -107,7 +109,7 @@ async def scrape_switzerland_route(
logger.warning(f"CSRF token missing for POST /api/realestate/scrape-switzerland from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -115,7 +117,7 @@ async def scrape_switzerland_route(
logger.warning(f"Invalid CSRF token format for POST /api/realestate/scrape-switzerland from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -125,7 +127,7 @@ async def scrape_switzerland_route(
logger.warning(f"CSRF token is not a valid hex string for POST /api/realestate/scrape-switzerland from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Extract parameters from body with defaults
@@ -137,19 +139,19 @@ async def scrape_switzerland_route(
if grid_size <= 0 or grid_size > 10000:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="grid_size must be between 0 and 10000 meters"
+ detail=routeApiMsg("grid_size must be between 0 and 10000 meters")
)
if max_concurrent <= 0 or max_concurrent > 200:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="max_concurrent must be between 1 and 200"
+ detail=routeApiMsg("max_concurrent must be between 1 and 200")
)
if batch_size <= 0 or batch_size > 1000:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="batch_size must be between 1 and 1000"
+ detail=routeApiMsg("batch_size must be between 1 and 1000")
)
logger.info(
@@ -246,7 +248,7 @@ async def get_all_gemeinden(
logger.warning(f"CSRF token missing for GET /api/realestate/gemeinden from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -254,7 +256,7 @@ async def get_all_gemeinden(
logger.warning(f"Invalid CSRF token format for GET /api/realestate/gemeinden from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -264,7 +266,7 @@ async def get_all_gemeinden(
logger.warning(f"CSRF token is not a valid hex string for GET /api/realestate/gemeinden from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Fetching all Gemeinden for user {currentUser.id} (mandate: {currentUser.mandateId}), only_current={only_current}")
@@ -548,7 +550,7 @@ async def fetch_bzo_documents(
logger.warning(f"CSRF token missing for POST /api/realestate/gemeinden/fetch-bzo-documents from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing. Please include X-CSRF-Token header."
+ detail=routeApiMsg("CSRF token missing. Please include X-CSRF-Token header.")
)
# Basic CSRF token format validation
@@ -556,7 +558,7 @@ async def fetch_bzo_documents(
logger.warning(f"Invalid CSRF token format for POST /api/realestate/gemeinden/fetch-bzo-documents from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
# Validate token is hex string
@@ -566,7 +568,7 @@ async def fetch_bzo_documents(
logger.warning(f"CSRF token is not a valid hex string for POST /api/realestate/gemeinden/fetch-bzo-documents from user {currentUser.id}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="Invalid CSRF token format"
+ detail=routeApiMsg("Invalid CSRF token format")
)
logger.info(f"Starting BZO document fetch for user {currentUser.id} (mandate: {currentUser.mandateId})")
diff --git a/modules/routes/routeSecurityAdmin.py b/modules/routes/routeSecurityAdmin.py
deleted file mode 100644
index acba83b4..00000000
--- a/modules/routes/routeSecurityAdmin.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Security Administration routes.
-MULTI-TENANT: These are SYSTEM-LEVEL operations requiring isSysAdmin=true.
-No mandate context - SysAdmin manages infrastructure, not data.
-"""
-from fastapi import APIRouter, HTTPException, Depends, status, Request, Body
-from fastapi.responses import FileResponse, JSONResponse
-from typing import Optional, Dict, Any, List
-import os
-import logging
-
-from modules.auth import getCurrentUser, limiter, requireSysAdmin
-from modules.connectors.connectorDbPostgre import DatabaseConnector
-from modules.interfaces.interfaceDbApp import getRootInterface
-from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority
-from modules.datamodels.datamodelSecurity import Token
-from modules.shared.configuration import APP_CONFIG
-
-logger = logging.getLogger(__name__)
-
-router = APIRouter(
- prefix="/api/admin",
- tags=["Security Administration"],
- responses={
- 404: {"description": "Not found"},
- 400: {"description": "Bad request"},
- 401: {"description": "Unauthorized"},
- 403: {"description": "Forbidden"},
- 500: {"description": "Internal server error"}
- }
-)
-
-
-def _getPoweronDatabases() -> List[str]:
- """Load databases from PostgreSQL host matching poweron_%."""
- dbHost = APP_CONFIG.get("DB_HOST")
- dbUser = APP_CONFIG.get("DB_USER")
- dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
- dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
-
- # Connect to 'postgres' system database to query all databases
- connector = DatabaseConnector(
- dbHost=dbHost,
- dbDatabase="postgres",
- dbUser=dbUser,
- dbPassword=dbPassword,
- dbPort=dbPort,
- userId=None
- )
-
- try:
- with connector.connection.cursor() as cursor:
- cursor.execute(
- """
- SELECT datname
- FROM pg_database
- WHERE datname LIKE 'poweron_%'
- AND datistemplate = false
- ORDER BY datname
- """
- )
- rows = cursor.fetchall()
- return [row["datname"] for row in rows if row.get("datname")]
- finally:
- connector.close()
-
-
-def _getDatabaseConnector(databaseName: str, userId: str = None) -> DatabaseConnector:
- """
- Create a generic DatabaseConnector for any poweron_* database.
- Fully dynamic - no interface mapping needed.
- """
- if not databaseName.startswith("poweron_"):
- raise ValueError(f"Invalid database name: {databaseName}")
-
- dbHost = APP_CONFIG.get("DB_HOST")
- dbUser = APP_CONFIG.get("DB_USER")
- dbPassword = APP_CONFIG.get("DB_PASSWORD_SECRET")
- dbPort = int(APP_CONFIG.get("DB_PORT", 5432))
-
- connector = DatabaseConnector(
- dbHost=dbHost,
- dbDatabase=databaseName,
- dbUser=dbUser,
- dbPassword=dbPassword,
- dbPort=dbPort,
- userId=userId
- )
- return connector
-
-
-# ----------------------
-# Token listing and revocation
-# ----------------------
-
-@router.get("/tokens")
-@limiter.limit("30/minute")
-def list_tokens(
- request: Request,
- currentUser: User = Depends(requireSysAdmin),
- userId: Optional[str] = None,
- authority: Optional[str] = None,
- sessionId: Optional[str] = None,
- statusFilter: Optional[str] = None,
- connectionId: Optional[str] = None,
-) -> List[Dict[str, Any]]:
- """
- List all tokens in the system.
- MULTI-TENANT: SysAdmin-only, no mandate filter (system-level view).
- """
- try:
- appInterface = getRootInterface()
-
- recordFilter: Dict[str, Any] = {}
- if userId:
- recordFilter["userId"] = userId
- if authority:
- recordFilter["authority"] = authority
- if sessionId:
- recordFilter["sessionId"] = sessionId
- if connectionId:
- recordFilter["connectionId"] = connectionId
- if statusFilter:
- recordFilter["status"] = statusFilter
- # MULTI-TENANT: SysAdmin sees ALL tokens (no mandate filter)
- # Use interface method to get tokens with flexible filtering
- tokens = appInterface.getAllTokens(recordFilter=recordFilter)
- return tokens
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error listing tokens: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to list tokens")
-
-
-@router.post("/tokens/revoke/user")
-@limiter.limit("30/minute")
-def revoke_tokens_by_user(
- request: Request,
- currentUser: User = Depends(requireSysAdmin),
- payload: Dict[str, Any] = Body(...)
-) -> Dict[str, Any]:
- """
- Revoke all tokens for a user.
- MULTI-TENANT: SysAdmin-only, can revoke across all mandates.
- """
- try:
- userId = payload.get("userId")
- authority = payload.get("authority")
- reason = payload.get("reason", "sysadmin revoke")
- if not userId:
- raise HTTPException(status_code=400, detail="userId is required")
-
- appInterface = getRootInterface()
- # MULTI-TENANT: SysAdmin can revoke any user's tokens (no mandate restriction)
- count = appInterface.revokeTokensByUser(
- userId=userId,
- authority=AuthAuthority(authority) if authority else None,
- mandateId=None, # SysAdmin: no mandate filter
- revokedBy=currentUser.id,
- reason=reason
- )
- return {"revoked": count}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error revoking tokens by user: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to revoke tokens")
-
-
-@router.post("/tokens/revoke/session")
-@limiter.limit("30/minute")
-def revoke_tokens_by_session(
- request: Request,
- currentUser: User = Depends(requireSysAdmin),
- payload: Dict[str, Any] = Body(...)
-) -> Dict[str, Any]:
- """
- Revoke all tokens for a specific session.
- MULTI-TENANT: SysAdmin-only.
- """
- try:
- userId = payload.get("userId")
- sessionId = payload.get("sessionId")
- authority = payload.get("authority", "local")
- reason = payload.get("reason", "sysadmin session revoke")
- if not userId or not sessionId:
- raise HTTPException(status_code=400, detail="userId and sessionId are required")
-
- appInterface = getRootInterface()
- # MULTI-TENANT: SysAdmin can revoke any session (no mandate check)
- count = appInterface.revokeTokensBySessionId(
- sessionId=sessionId,
- userId=userId,
- authority=AuthAuthority(authority),
- revokedBy=currentUser.id,
- reason=reason
- )
- return {"revoked": count}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error revoking tokens by session: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to revoke session tokens")
-
-
-@router.post("/tokens/revoke/id")
-@limiter.limit("30/minute")
-def revoke_token_by_id(
- request: Request,
- currentUser: User = Depends(requireSysAdmin),
- payload: Dict[str, Any] = Body(...)
-) -> Dict[str, Any]:
- """
- Revoke a specific token by ID.
- MULTI-TENANT: SysAdmin-only.
- """
- try:
- tokenId = payload.get("tokenId")
- reason = payload.get("reason", "sysadmin revoke")
- if not tokenId:
- raise HTTPException(status_code=400, detail="tokenId is required")
- appInterface = getRootInterface()
- # MULTI-TENANT: SysAdmin can revoke any token (no mandate check)
- ok = appInterface.revokeTokenById(tokenId, revokedBy=currentUser.id, reason=reason)
- return {"revoked": 1 if ok else 0}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error revoking token by id: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to revoke token")
-
-
-@router.post("/tokens/revoke/mandate")
-@limiter.limit("10/minute")
-def revoke_tokens_by_mandate(
- request: Request,
- currentUser: User = Depends(requireSysAdmin),
- payload: Dict[str, Any] = Body(...)
-) -> Dict[str, Any]:
- """
- Revoke all tokens for users in a mandate.
- MULTI-TENANT: SysAdmin-only, can revoke tokens for any mandate.
- """
- try:
- mandateId = payload.get("mandateId")
- authority = payload.get("authority", "local")
- reason = payload.get("reason", "sysadmin mandate revoke")
- if not mandateId:
- raise HTTPException(status_code=400, detail="mandateId is required")
-
- # MULTI-TENANT: SysAdmin can revoke tokens for any mandate
- appInterface = getRootInterface()
-
- # Get all UserMandate entries for this mandate to find users using interface method
- userMandates = appInterface.getUserMandatesByMandate(mandateId)
-
- total = 0
- for um in userMandates:
- total += appInterface.revokeTokensByUser(
- userId=um.userId,
- authority=AuthAuthority(authority) if authority else None,
- mandateId=None, # Revoke all tokens for user
- revokedBy=currentUser.id,
- reason=reason
- )
- return {"revoked": total}
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error revoking tokens by mandate: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to revoke mandate tokens")
-
-
-
-
-# ----------------------
-# Database admin
-# ----------------------
-
-@router.get("/databases")
-@limiter.limit("10/minute")
-def list_databases(
- request: Request,
- currentUser: User = Depends(requireSysAdmin)
-) -> Dict[str, Any]:
- """
- List all poweron_* databases.
- MULTI-TENANT: SysAdmin-only (infrastructure management).
- """
- try:
- databases = _getPoweronDatabases()
- return {"databases": databases}
- except Exception as e:
- logger.error(f"Failed to load databases from host: {e}")
- raise HTTPException(status_code=500, detail="Failed to load databases from host")
-
-
-@router.get("/databases/{database_name}/tables")
-@limiter.limit("30/minute")
-def get_database_tables(
- request: Request,
- database_name: str,
- currentUser: User = Depends(requireSysAdmin)
-) -> Dict[str, Any]:
- """
- List tables in a database.
- MULTI-TENANT: SysAdmin-only (infrastructure management).
- """
- if not database_name.startswith("poweron_"):
- raise HTTPException(status_code=400, detail="Invalid database name format")
-
- connector = None
- try:
- connector = _getDatabaseConnector(database_name, currentUser.id)
- tables = connector.getTables()
- return {"tables": tables}
- except ValueError as e:
- raise HTTPException(status_code=400, detail=str(e))
- except Exception as e:
- logger.error(f"Error getting database tables: {str(e)}")
- raise HTTPException(status_code=500, detail=f"Failed to get database tables: {str(e)}")
- finally:
- if connector:
- connector.close()
-
-
-@router.post("/databases/{database_name}/tables/{table_name}/drop")
-@limiter.limit("10/minute")
-def drop_table(
- request: Request,
- database_name: str,
- table_name: str,
- currentUser: User = Depends(requireSysAdmin),
- payload: Dict[str, Any] = Body(...)
-) -> Dict[str, Any]:
- """
- Drop a table from a database.
- MULTI-TENANT: SysAdmin-only (infrastructure management).
- """
- if not database_name.startswith("poweron_"):
- raise HTTPException(status_code=400, detail="Invalid database name format")
-
- connector = None
- try:
- connector = _getDatabaseConnector(database_name, currentUser.id)
- conn = connector.connection
- with conn.cursor() as cursor:
- # Check if table exists
- cursor.execute("""
- SELECT table_name FROM information_schema.tables
- WHERE table_schema = 'public' AND table_name = %s
- """, (table_name,))
- if not cursor.fetchone():
- raise HTTPException(status_code=404, detail="Table not found")
-
- # Drop the table
- cursor.execute(f'DROP TABLE IF EXISTS "{table_name}" CASCADE')
- conn.commit()
- logger.warning(f"Admin drop_table executed by {currentUser.id}: dropped table '{table_name}' from database '{database_name}'")
- return {"message": f"Table '{table_name}' dropped successfully from database '{database_name}'"}
- except HTTPException:
- raise
- except ValueError as e:
- raise HTTPException(status_code=400, detail=str(e))
- except Exception as e:
- logger.error(f"Error dropping table: {str(e)}")
- if connector and connector.connection:
- connector.connection.rollback()
- raise HTTPException(status_code=500, detail="Failed to drop table")
- finally:
- if connector:
- connector.close()
-
-
-@router.post("/databases/drop")
-@limiter.limit("5/minute")
-def drop_database(
- request: Request,
- currentUser: User = Depends(requireSysAdmin),
- payload: Dict[str, Any] = Body(...)
-) -> Dict[str, Any]:
- """
- Drop all tables in a database.
- MULTI-TENANT: SysAdmin-only (infrastructure management).
- """
- dbName = payload.get("database")
-
- if not dbName or not dbName.startswith("poweron_"):
- raise HTTPException(status_code=400, detail="Invalid database name")
-
- # Validate database exists
- try:
- configuredDbs = _getPoweronDatabases()
- except Exception as e:
- logger.warning(f"Failed to load databases from host: {e}")
- configuredDbs = []
-
- if configuredDbs and dbName not in configuredDbs:
- raise HTTPException(status_code=400, detail=f"Database not found. Available: {configuredDbs}")
-
- connector = None
- try:
- connector = _getDatabaseConnector(dbName, currentUser.id)
- conn = connector.connection
- with conn.cursor() as cursor:
- # Drop all user tables (public schema)
- cursor.execute("""
- SELECT table_name FROM information_schema.tables
- WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
- """)
- tables = [row['table_name'] for row in cursor.fetchall()]
- dropped = []
- for tbl in tables:
- cursor.execute(f'DROP TABLE IF EXISTS "{tbl}" CASCADE')
- dropped.append(tbl)
- conn.commit()
- logger.warning(f"Admin drop_database executed by {currentUser.id}: dropped tables from '{dbName}': {dropped}")
- return {"droppedTables": dropped}
- except ValueError as e:
- raise HTTPException(status_code=400, detail=str(e))
- except Exception as e:
- logger.error(f"Error dropping database tables: {str(e)}")
- if connector and connector.connection:
- connector.connection.rollback()
- raise HTTPException(status_code=500, detail="Failed to drop database tables")
- finally:
- if connector:
- connector.close()
-
-
diff --git a/modules/routes/routeSecurityClickup.py b/modules/routes/routeSecurityClickup.py
index 3d1aeed5..ca787391 100644
--- a/modules/routes/routeSecurityClickup.py
+++ b/modules/routes/routeSecurityClickup.py
@@ -19,6 +19,8 @@ from modules.datamodels.datamodelUam import AuthAuthority, User, ConnectionStatu
from modules.datamodels.datamodelSecurity import Token, TokenPurpose
from modules.auth import getCurrentUser, limiter, SECRET_KEY, ALGORITHM
from modules.shared.timeUtils import createExpirationTimestamp, getUtcTimestamp
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeSecurityClickup")
logger = logging.getLogger(__name__)
@@ -53,7 +55,7 @@ def _require_clickup_config():
if not CLIENT_ID or not CLIENT_SECRET or not REDIRECT_URI:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="ClickUp OAuth is not configured (Service_CLICKUP_CLIENT_ID, Service_CLICKUP_CLIENT_SECRET, Service_CLICKUP_OAUTH_REDIRECT_URI)",
+ detail=routeApiMsg("ClickUp OAuth is not configured (Service_CLICKUP_CLIENT_ID, Service_CLICKUP_CLIENT_SECRET, Service_CLICKUP_OAUTH_REDIRECT_URI)"),
)
@@ -87,7 +89,7 @@ def auth_connect(
connection = conn
break
if not connection:
- raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="ClickUp connection not found")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("ClickUp connection not found"))
state_jwt = _issue_oauth_state(
{
@@ -123,11 +125,11 @@ async def auth_connect_callback(
"""OAuth callback for ClickUp data connection."""
state_data = _parse_oauth_state(state)
if state_data.get("flow") != _FLOW_CONNECT:
- raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid OAuth flow for this callback"))
connection_id = state_data.get("connectionId")
user_id = state_data.get("userId")
if not connection_id or not user_id:
- raise HTTPException(status_code=400, detail="Missing connection or user in OAuth state")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Missing connection or user in OAuth state"))
_require_clickup_config()
diff --git a/modules/routes/routeSecurityGoogle.py b/modules/routes/routeSecurityGoogle.py
index 2b380db0..96b84203 100644
--- a/modules/routes/routeSecurityGoogle.py
+++ b/modules/routes/routeSecurityGoogle.py
@@ -33,6 +33,8 @@ from modules.auth import (
from modules.auth.tokenManager import TokenManager
from modules.auth.oauthProviderConfig import googleAuthScopes, googleDataScopes
from modules.shared.timeUtils import createExpirationTimestamp, getUtcTimestamp, parseTimestamp
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeSecurityGoogle")
logger = logging.getLogger(__name__)
@@ -109,29 +111,11 @@ DATA_CLIENT_SECRET = APP_CONFIG.get("Service_GOOGLE_DATA_CLIENT_SECRET")
DATA_REDIRECT_URI = APP_CONFIG.get("Service_GOOGLE_DATA_REDIRECT_URI")
-@router.get("/config")
-def get_config():
- """Debug: OAuth configuration (Auth vs Data apps)."""
- return {
- "auth_client_id": AUTH_CLIENT_ID,
- "auth_client_secret": "***" if AUTH_CLIENT_SECRET else None,
- "auth_redirect_uri": AUTH_REDIRECT_URI,
- "auth_scopes": googleAuthScopes,
- "data_client_id": DATA_CLIENT_ID,
- "data_client_secret": "***" if DATA_CLIENT_SECRET else None,
- "data_redirect_uri": DATA_REDIRECT_URI,
- "data_scopes": googleDataScopes,
- "config_loaded": bool(
- AUTH_CLIENT_ID and AUTH_CLIENT_SECRET and AUTH_REDIRECT_URI and DATA_CLIENT_ID and DATA_CLIENT_SECRET and DATA_REDIRECT_URI
- ),
- }
-
-
def _require_google_auth_config():
if not AUTH_CLIENT_ID or not AUTH_CLIENT_SECRET or not AUTH_REDIRECT_URI:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Google Auth OAuth is not configured (Service_GOOGLE_AUTH_*)",
+ detail=routeApiMsg("Google Auth OAuth is not configured (Service_GOOGLE_AUTH_*)"),
)
@@ -139,7 +123,7 @@ def _require_google_data_config():
if not DATA_CLIENT_ID or not DATA_CLIENT_SECRET or not DATA_REDIRECT_URI:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Google Data OAuth is not configured (Service_GOOGLE_DATA_*)",
+ detail=routeApiMsg("Google Data OAuth is not configured (Service_GOOGLE_DATA_*)"),
)
@@ -179,7 +163,7 @@ async def auth_login_callback(
"""OAuth callback for Google Auth app (login only)."""
state_data = _parse_oauth_state(state)
if state_data.get("flow") != _FLOW_LOGIN:
- raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid OAuth flow for this callback"))
_require_google_auth_config()
oauth = OAuth2Session(client_id=AUTH_CLIENT_ID, redirect_uri=AUTH_REDIRECT_URI)
@@ -214,7 +198,7 @@ async def auth_login_callback(
if user_info_response.status_code != 200:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to get user info from Google",
+ detail=routeApiMsg("Failed to get user info from Google"),
)
user_info = user_info_response.json()
@@ -310,7 +294,7 @@ def auth_connect(
connection = conn
break
if not connection:
- raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Google connection not found")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("Google connection not found"))
state_jwt = _issue_oauth_state(
{
@@ -359,11 +343,11 @@ async def auth_connect_callback(
"""OAuth callback for Google Data app (UserConnection)."""
state_data = _parse_oauth_state(state)
if state_data.get("flow") != _FLOW_CONNECT:
- raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid OAuth flow for this callback"))
connection_id = state_data.get("connectionId")
user_id = state_data.get("userId")
if not connection_id or not user_id:
- raise HTTPException(status_code=400, detail="Missing connection or user in OAuth state")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Missing connection or user in OAuth state"))
_require_google_data_config()
oauth = OAuth2Session(client_id=DATA_CLIENT_ID, redirect_uri=DATA_REDIRECT_URI)
@@ -419,7 +403,7 @@ async def auth_connect_callback(
if user_info_response.status_code != 200:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to get user info from Google",
+ detail=routeApiMsg("Failed to get user info from Google"),
)
user_info = user_info_response.json()
@@ -557,7 +541,7 @@ def logout(
if not token:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="No token found",
+ detail=routeApiMsg("No token found"),
)
try:
@@ -568,7 +552,7 @@ def logout(
logger.error(f"Failed to decode JWT on Google logout: {str(e)}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invalid token",
+ detail=routeApiMsg("Invalid token"),
)
revoked = 0
@@ -618,52 +602,6 @@ def logout(
)
-@router.post("/verify")
-@limiter.limit("30/minute")
-async def verify_token(
- request: Request,
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- try:
- appInterface = getInterface(currentUser)
- connections = appInterface.getUserConnections(currentUser.id)
- google_connection = None
- for conn in connections:
- if conn.authority == AuthAuthority.GOOGLE:
- google_connection = conn
- break
- if not google_connection:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail="No Google connection found for current user",
- )
- current_token = TokenManager().getFreshToken(google_connection.id)
- if not current_token:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail="No Google token found for this connection",
- )
- token_verification = await verify_google_token(current_token.tokenAccess)
- return {
- "valid": token_verification.get("valid", False),
- "scopes": token_verification.get("scopes", []),
- "expires_in": token_verification.get("expires_in", 0),
- "email": token_verification.get("email"),
- "user_id": token_verification.get("user_id"),
- "error": token_verification.get("error")
- if not token_verification.get("valid")
- else None,
- }
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error verifying Google token: {str(e)}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Failed to verify token: {str(e)}",
- )
-
-
@router.post("/refresh")
@limiter.limit("10/minute")
async def refresh_token(
@@ -690,7 +628,7 @@ async def refresh_token(
if not google_connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Requested Google connection not found for current user",
+ detail=routeApiMsg("Requested Google connection not found for current user"),
)
else:
for conn in connections:
@@ -700,13 +638,13 @@ async def refresh_token(
if not google_connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="No Google connection found for current user",
+ detail=routeApiMsg("No Google connection found for current user"),
)
current_token = TokenManager().getFreshToken(google_connection.id)
if not current_token:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="No Google token found for this connection",
+ detail=routeApiMsg("No Google token found for this connection"),
)
expiresAtValue = parseTimestamp(current_token.expiresAt)
google_connection.expiresAt = (
diff --git a/modules/routes/routeSecurityLocal.py b/modules/routes/routeSecurityLocal.py
index 9ec4fc38..fa68b5b9 100644
--- a/modules/routes/routeSecurityLocal.py
+++ b/modules/routes/routeSecurityLocal.py
@@ -21,6 +21,8 @@ from modules.datamodels.datamodelUam import User, UserInDB, AuthAuthority, Manda
from modules.datamodels.datamodelSecurity import Token, TokenPurpose
from modules.shared.configuration import APP_CONFIG
from modules.shared.timeUtils import getUtcTimestamp
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeSecurityLocal")
# Configure logger
logger = logging.getLogger(__name__)
@@ -212,7 +214,7 @@ def _ensureHomeMandate(rootInterface, user) -> None:
rootInterface._provisionMandateForUser(
userId=userId,
mandateName=homeMandateName,
- planKey="TRIAL_7D",
+ planKey="TRIAL_14D",
)
logger.info(f"Created Home mandate '{homeMandateName}' for user {user.username}")
@@ -231,7 +233,7 @@ def login(
if not csrf_token:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="CSRF token missing"
+ detail=routeApiMsg("CSRF token missing")
)
# Get gateway interface with root privileges for authentication
@@ -248,7 +250,7 @@ def login(
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Invalid username or password",
+ detail=routeApiMsg("Invalid username or password"),
headers={"WWW-Authenticate": "Bearer"},
)
@@ -280,7 +282,7 @@ def login(
expires_at = datetime.fromtimestamp(payload.get("exp"))
except Exception as e:
logger.error(f"Failed to decode access token: {str(e)}")
- raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to finalize token")
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=routeApiMsg("Failed to finalize token"))
# Get user-specific interface for token operations
userInterface = getInterface(user)
@@ -399,7 +401,7 @@ def register_user(
Unified registration path: invited users skip Home mandate provisioning
(they join the inviting mandate instead). Non-invited users get a Home
- mandate with TRIAL_7D. Company mandate creation is deferred to onboarding.
+ mandate with TRIAL_14D. Company mandate creation is deferred to onboarding.
Args:
userData: User data (username, email, fullName, language)
@@ -425,7 +427,7 @@ def register_user(
if not user:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Failed to register user"
+ detail=routeApiMsg("Failed to register user")
)
# Check for pending invitations BEFORE provisioning.
@@ -466,7 +468,7 @@ def register_user(
provisionResult = appInterface._provisionMandateForUser(
userId=str(user.id),
mandateName=homeMandateName,
- planKey="TRIAL_7D",
+ planKey="TRIAL_14D",
)
logger.info(f"Provisioned Home mandate for user {user.id}: {provisionResult}")
except Exception as provErr:
@@ -581,32 +583,32 @@ def refresh_token(
# Get refresh token from cookie
refresh_token = request.cookies.get('refresh_token')
if not refresh_token:
- raise HTTPException(status_code=401, detail="No refresh token found")
+ raise HTTPException(status_code=401, detail=routeApiMsg("No refresh token found"))
# Validate refresh token
try:
payload = jwt.decode(refresh_token, SECRET_KEY, algorithms=[ALGORITHM])
if payload.get("type") != "refresh":
- raise HTTPException(status_code=401, detail="Invalid refresh token type")
+ raise HTTPException(status_code=401, detail=routeApiMsg("Invalid refresh token type"))
except jwt.ExpiredSignatureError:
- raise HTTPException(status_code=401, detail="Refresh token expired")
+ raise HTTPException(status_code=401, detail=routeApiMsg("Refresh token expired"))
except jwt.JWTError:
- raise HTTPException(status_code=401, detail="Invalid refresh token")
+ raise HTTPException(status_code=401, detail=routeApiMsg("Invalid refresh token"))
# Get user information from refresh token payload
user_id = payload.get("userId")
if not user_id:
- raise HTTPException(status_code=401, detail="Invalid refresh token - missing user ID")
+ raise HTTPException(status_code=401, detail=routeApiMsg("Invalid refresh token - missing user ID"))
# Get user from database using the user ID from refresh token
try:
app_interface = getRootInterface()
current_user = app_interface.getUser(user_id)
if not current_user:
- raise HTTPException(status_code=401, detail="User not found")
+ raise HTTPException(status_code=401, detail=routeApiMsg("User not found"))
except Exception as e:
logger.error(f"Failed to get user from database: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to validate user")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Failed to validate user"))
# Create new token data
# MULTI-TENANT: Token does NOT contain mandateId anymore
@@ -627,7 +629,7 @@ def refresh_token(
expires_at = datetime.fromtimestamp(payload.get("exp"))
except Exception as e:
logger.error(f"Failed to decode new access token: {str(e)}")
- raise HTTPException(status_code=500, detail="Failed to create new token")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Failed to create new token"))
return {
"type": "token_refresh_success",
@@ -643,7 +645,7 @@ def refresh_token(
raise
except Exception as e:
logger.error(f"Token refresh error: {str(e)}")
- raise HTTPException(status_code=500, detail="Token refresh failed")
+ raise HTTPException(status_code=500, detail=routeApiMsg("Token refresh failed"))
@router.post("/logout")
@limiter.limit("30/minute")
@@ -661,7 +663,7 @@ def logout(request: Request, response: Response, currentUser: User = Depends(get
token = auth_header.split(" ", 1)[1].strip()
if not token:
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="No token found")
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=routeApiMsg("No token found"))
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
@@ -669,7 +671,7 @@ def logout(request: Request, response: Response, currentUser: User = Depends(get
jti = payload.get("jti")
except Exception as e:
logger.error(f"Failed to decode JWT on logout: {str(e)}")
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid token")
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=routeApiMsg("Invalid token"))
revoked = 0
if session_id:
@@ -834,7 +836,7 @@ def onboarding_provision(
request: Request,
currentUser: User = Depends(getCurrentUser),
companyName: str = Body(None, embed=True),
- planKey: str = Body("TRIAL_7D", embed=True),
+ planKey: str = Body("TRIAL_14D", embed=True),
) -> Dict[str, Any]:
"""Post-login onboarding: create a mandate for the user.
@@ -882,8 +884,8 @@ def onboarding_provision(
mandateName = (companyName.strip() if companyName and companyName.strip()
else f"Home {currentUser.username}")
- if planKey not in ("TRIAL_7D", "STANDARD_MONTHLY", "STANDARD_YEARLY"):
- planKey = "TRIAL_7D"
+ if planKey not in ("TRIAL_14D", "STARTER_MONTHLY", "STARTER_YEARLY", "PROFESSIONAL_MONTHLY", "PROFESSIONAL_YEARLY", "MAX_MONTHLY", "MAX_YEARLY"):
+ planKey = "TRIAL_14D"
result = appInterface._provisionMandateForUser(
userId=userId,
@@ -927,14 +929,14 @@ def password_reset(
except ValueError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Ungültiger oder abgelaufener Reset-Link"
+ detail=routeApiMsg("Ungültiger oder abgelaufener Reset-Link")
)
# Validate password strength
if len(password) < 8:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Passwort muss mindestens 8 Zeichen lang sein"
+ detail=routeApiMsg("Passwort muss mindestens 8 Zeichen lang sein")
)
rootInterface = getRootInterface()
@@ -945,7 +947,7 @@ def password_reset(
if not success:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Ungültiger oder abgelaufener Reset-Link"
+ detail=routeApiMsg("Ungültiger oder abgelaufener Reset-Link")
)
# Log success
@@ -968,7 +970,7 @@ def password_reset(
logger.error(f"Error in password reset: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Passwort-Zurücksetzung fehlgeschlagen"
+ detail=routeApiMsg("Passwort-Zurücksetzung fehlgeschlagen")
)
@@ -1005,10 +1007,10 @@ def _deleteNeutralizationMapping(
rootIf = getRootInterface()
records = rootIf.db.getRecordset(DataNeutralizerAttributes, recordFilter={"id": mappingId})
if not records:
- raise HTTPException(status_code=404, detail="Mapping not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Mapping not found"))
rec = records[0]
recUserId = rec.get("userId") if isinstance(rec, dict) else getattr(rec, "userId", None)
if recUserId != userId:
- raise HTTPException(status_code=403, detail="Not your mapping")
+ raise HTTPException(status_code=403, detail=routeApiMsg("Not your mapping"))
rootIf.db.recordDelete(DataNeutralizerAttributes, mappingId)
return {"deleted": True, "id": mappingId}
diff --git a/modules/routes/routeSecurityMsft.py b/modules/routes/routeSecurityMsft.py
index d7fac372..88575881 100644
--- a/modules/routes/routeSecurityMsft.py
+++ b/modules/routes/routeSecurityMsft.py
@@ -34,6 +34,8 @@ from modules.auth import (
from modules.auth.tokenManager import TokenManager
from modules.auth.oauthProviderConfig import msftAuthScopes, msftDataScopes, msftDataScopesForRefresh
from modules.shared.timeUtils import createExpirationTimestamp, getUtcTimestamp, parseTimestamp
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeSecurityMsft")
logger = logging.getLogger(__name__)
@@ -80,7 +82,7 @@ def _require_msft_auth_config():
if not AUTH_CLIENT_ID or not AUTH_CLIENT_SECRET or not AUTH_REDIRECT_URI:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Microsoft Auth OAuth is not configured (Service_MSFT_AUTH_*)",
+ detail=routeApiMsg("Microsoft Auth OAuth is not configured (Service_MSFT_AUTH_*)"),
)
@@ -88,7 +90,7 @@ def _require_msft_data_config():
if not DATA_CLIENT_ID or not DATA_CLIENT_SECRET or not DATA_REDIRECT_URI:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Microsoft Data OAuth is not configured (Service_MSFT_DATA_*)",
+ detail=routeApiMsg("Microsoft Data OAuth is not configured (Service_MSFT_DATA_*)"),
)
@@ -140,7 +142,7 @@ async def auth_login_callback(
) -> HTMLResponse:
state_data = _parse_oauth_state(state)
if state_data.get("flow") != _FLOW_LOGIN:
- raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid OAuth flow for this callback"))
_require_msft_auth_config()
msal_app = msal.ConfidentialClientApplication(
@@ -171,7 +173,7 @@ async def auth_login_callback(
if user_info_response.status_code != 200:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to get user info from Microsoft",
+ detail=routeApiMsg("Failed to get user info from Microsoft"),
)
user_info = user_info_response.json()
@@ -256,7 +258,7 @@ def auth_connect(
break
if not connection:
raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND, detail="Microsoft connection not found"
+ status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("Microsoft connection not found")
)
msal_app = msal.ConfidentialClientApplication(
@@ -301,11 +303,11 @@ async def auth_connect_callback(
) -> HTMLResponse:
state_data = _parse_oauth_state(state)
if state_data.get("flow") != _FLOW_CONNECT:
- raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid OAuth flow for this callback"))
connection_id = state_data.get("connectionId")
user_id = state_data.get("userId")
if not connection_id or not user_id:
- raise HTTPException(status_code=400, detail="Missing connection or user in OAuth state")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Missing connection or user in OAuth state"))
_require_msft_data_config()
msal_app = msal.ConfidentialClientApplication(
@@ -343,7 +345,7 @@ async def auth_connect_callback(
if user_info_response.status_code != 200:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to get user info from Microsoft",
+ detail=routeApiMsg("Failed to get user info from Microsoft"),
)
user_info = user_info_response.json()
@@ -465,7 +467,7 @@ def adminconsent(request: Request) -> RedirectResponse:
if not redirect_uri:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Could not derive admin consent redirect URI from Service_MSFT_DATA_REDIRECT_URI",
+ detail=routeApiMsg("Could not derive admin consent redirect URI from Service_MSFT_DATA_REDIRECT_URI"),
)
state_jwt = _issue_oauth_state({"flow": "admin_consent"})
scope_param = _msft_data_admin_consent_scope_param()
@@ -528,7 +530,7 @@ def adminconsent_callback(
state_data = _parse_oauth_state(state)
if state_data.get("flow") != "admin_consent":
- raise HTTPException(status_code=400, detail="Invalid OAuth flow for this callback")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid OAuth flow for this callback"))
granted = str(admin_consent or "").strip().lower() in ("true", "1", "yes")
if not granted:
@@ -615,7 +617,7 @@ def logout(
if not token:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="No token found",
+ detail=routeApiMsg("No token found"),
)
try:
@@ -626,7 +628,7 @@ def logout(
logger.error(f"Failed to decode JWT on Microsoft logout: {str(e)}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
- detail="Invalid token",
+ detail=routeApiMsg("Invalid token"),
)
revoked = 0
@@ -676,24 +678,6 @@ def logout(
)
-@router.post("/cleanup")
-@limiter.limit("5/minute")
-def cleanup_expired_tokens(
- request: Request,
- currentUser: User = Depends(getCurrentUser),
-) -> Dict[str, Any]:
- try:
- appInterface = getInterface(currentUser)
- cleaned_count = appInterface.cleanupExpiredTokens()
- return {"message": "Cleanup completed successfully", "tokens_cleaned": cleaned_count}
- except Exception as e:
- logger.error(f"Error cleaning up expired tokens: {str(e)}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Failed to cleanup expired tokens: {str(e)}",
- )
-
-
@router.post("/refresh")
@limiter.limit("10/minute")
async def refresh_token(
@@ -720,7 +704,7 @@ async def refresh_token(
if not msft_connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="Requested Microsoft connection not found for current user",
+ detail=routeApiMsg("Requested Microsoft connection not found for current user"),
)
else:
for conn in connections:
@@ -730,13 +714,13 @@ async def refresh_token(
if not msft_connection:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="No Microsoft connection found for current user",
+ detail=routeApiMsg("No Microsoft connection found for current user"),
)
current_token = TokenManager().getFreshToken(msft_connection.id)
if not current_token:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
- detail="No Microsoft token found for this connection",
+ detail=routeApiMsg("No Microsoft token found for this connection"),
)
token_manager = TokenManager()
refreshed_token = token_manager.refreshToken(current_token)
@@ -760,7 +744,7 @@ async def refresh_token(
}
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail="Failed to refresh token",
+ detail=routeApiMsg("Failed to refresh token"),
)
except HTTPException:
raise
diff --git a/modules/routes/routeSharepoint.py b/modules/routes/routeSharepoint.py
index 9bf5b633..4ae38112 100644
--- a/modules/routes/routeSharepoint.py
+++ b/modules/routes/routeSharepoint.py
@@ -13,6 +13,8 @@ from modules.auth import limiter, getCurrentUser
from modules.datamodels.datamodelUam import User, UserConnection
from modules.interfaces.interfaceDbApp import getInterface
from modules.serviceHub import getInterface as getServices
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeSharepoint")
logger = logging.getLogger(__name__)
@@ -27,18 +29,6 @@ router = APIRouter(
}
)
-def _getUserConnection(interface, connectionId: str, userId: str) -> Optional[UserConnection]:
- """Get a user connection by ID, ensuring it belongs to the user"""
- try:
- connections = interface.getUserConnections(userId)
- for conn in connections:
- if conn.id == connectionId:
- return conn
- return None
- except Exception as e:
- logger.error(f"Error getting user connection: {str(e)}")
- return None
-
def _getUserConnectionByReference(interface, connectionReference: str, userId: str) -> Optional[UserConnection]:
"""
Get a user connection by reference string (format: connection:authority:username).
@@ -77,211 +67,6 @@ def _getUserConnectionByReference(interface, connectionReference: str, userId: s
logger.error(f"Error getting user connection by reference: {str(e)}")
return None
-@router.get("/{connectionId}/sites", response_model=List[Dict[str, Any]])
-@limiter.limit("30/minute")
-async def get_sharepoint_sites(
- request: Request,
- connectionId: str = Path(..., description="Microsoft connection ID"),
- currentUser: User = Depends(getCurrentUser)
-) -> List[Dict[str, Any]]:
- """Get all SharePoint sites accessible via a Microsoft connection"""
- try:
- interface = getInterface(currentUser)
-
- # Get the connection and verify it belongs to the user
- connection = _getUserConnection(interface, connectionId, currentUser.id)
- if not connection:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Connection {connectionId} not found or does not belong to user"
- )
-
- # Verify it's a Microsoft connection
- authority = connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority)
- if authority.lower() != 'msft':
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=f"Connection {connectionId} is not a Microsoft connection"
- )
-
- # Initialize services
- services = getServices(currentUser, None)
-
- # Set access token on SharePoint service
- if not services.sharepoint.setAccessTokenFromConnection(connection):
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Failed to set SharePoint access token. Connection may be expired or invalid."
- )
-
- # Discover SharePoint sites
- sites = await services.sharepoint.discoverSites()
-
- return sites
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting SharePoint sites: {str(e)}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Error getting SharePoint sites: {str(e)}"
- )
-
-@router.get("/{connectionId}/sites/{siteId}/folders", response_model=List[Dict[str, Any]])
-@limiter.limit("60/minute")
-async def list_sharepoint_folders(
- request: Request,
- connectionId: str = Path(..., description="Microsoft connection ID"),
- siteId: str = Path(..., description="SharePoint site ID"),
- path: Optional[str] = Query(None, description="Folder path (empty for root)"),
- currentUser: User = Depends(getCurrentUser)
-) -> List[Dict[str, Any]]:
- """List folder contents for a SharePoint site and folder path"""
- try:
- interface = getInterface(currentUser)
-
- # Get the connection and verify it belongs to the user
- connection = _getUserConnection(interface, connectionId, currentUser.id)
- if not connection:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Connection {connectionId} not found or does not belong to user"
- )
-
- # Verify it's a Microsoft connection
- authority = connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority)
- if authority.lower() != 'msft':
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=f"Connection {connectionId} is not a Microsoft connection"
- )
-
- # Initialize services
- services = getServices(currentUser, None)
-
- # Set access token on SharePoint service
- if not services.sharepoint.setAccessTokenFromConnection(connection):
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Failed to set SharePoint access token. Connection may be expired or invalid."
- )
-
- # Normalize folder path (empty string for root)
- folderPath = path or ''
-
- # List folder contents
- items = await services.sharepoint.listFolderContents(siteId, folderPath)
-
- return items or []
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error listing SharePoint folders: {str(e)}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Error listing SharePoint folders: {str(e)}"
- )
-
-
-@router.get("/{connectionId}/folder-options", response_model=List[Dict[str, Any]])
-@limiter.limit("30/minute")
-async def getSharepointFolderOptions(
- request: Request,
- connectionId: str = Path(..., description="Microsoft connection ID"),
- siteId: Optional[str] = Query(None, description="Specific site ID to browse (if omitted, returns sites only)"),
- path: Optional[str] = Query(None, description="Folder path within site to browse"),
- currentUser: User = Depends(getCurrentUser)
-) -> List[Dict[str, Any]]:
- """
- Get SharePoint folders formatted as dropdown options.
-
- Two modes:
- 1. If siteId is not provided: Returns list of sites (for site selection)
- 2. If siteId is provided: Returns folders within that site (optionally at specific path)
-
- This avoids expensive iteration through all sites and folders.
- """
- try:
- interface = getInterface(currentUser)
-
- # Get the connection and verify it belongs to the user
- connection = _getUserConnection(interface, connectionId, currentUser.id)
- if not connection:
- raise HTTPException(
- status_code=status.HTTP_404_NOT_FOUND,
- detail=f"Connection {connectionId} not found or does not belong to user"
- )
-
- # Verify it's a Microsoft connection
- authority = connection.authority.value if hasattr(connection.authority, 'value') else str(connection.authority)
- if authority.lower() != 'msft':
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=f"Connection {connectionId} is not a Microsoft connection"
- )
-
- # Initialize services
- services = getServices(currentUser, None)
-
- # Set access token on SharePoint service
- if not services.sharepoint.setAccessTokenFromConnection(connection):
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Failed to set SharePoint access token. Connection may be expired or invalid."
- )
-
- # Mode 1: Return sites list if no siteId specified
- if not siteId:
- sites = await services.sharepoint.discoverSites()
- return [
- {
- "type": "site",
- "value": site.get("id"),
- "label": site.get("displayName", "Unknown Site"),
- "siteId": site.get("id"),
- "siteName": site.get("displayName", "Unknown Site"),
- "webUrl": site.get("webUrl", ""),
- "path": _extractSitePath(site.get("webUrl", ""))
- }
- for site in sites
- ]
-
- # Mode 2: Return folders within specific site
- folderPath = path or ""
- items = await services.sharepoint.listFolderContents(siteId, folderPath)
-
- if not items:
- return []
-
- folderOptions = []
- for item in items:
- if item.get("type") == "folder":
- folderName = item.get("name", "")
- itemPath = f"{folderPath}/{folderName}" if folderPath else folderName
-
- folderOptions.append({
- "type": "folder",
- "value": itemPath,
- "label": folderName,
- "siteId": siteId,
- "folderName": folderName,
- "path": itemPath,
- "hasChildren": True # Assume folders may have children
- })
-
- return folderOptions
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Error getting SharePoint folder options: {str(e)}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Error getting SharePoint folder options: {str(e)}"
- )
-
def _extractSitePath(webUrl: str) -> str:
"""Extract site path from webUrl (e.g., https://company.sharepoint.com/sites/MySite -> /sites/MySite)"""
@@ -343,7 +128,7 @@ async def getSharepointFolderOptionsByReference(
if not services.sharepoint.setAccessTokenFromConnection(connection):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Failed to set SharePoint access token. Connection may be expired or invalid."
+ detail=routeApiMsg("Failed to set SharePoint access token. Connection may be expired or invalid.")
)
# Mode 1: Return sites list if no siteId specified
diff --git a/modules/routes/routeStore.py b/modules/routes/routeStore.py
index ab50087c..04517d9b 100644
--- a/modules/routes/routeStore.py
+++ b/modules/routes/routeStore.py
@@ -7,7 +7,7 @@ in the user's explicit mandate. Supports Orphan Control.
"""
from fastapi import APIRouter, HTTPException, Depends, Request
-from typing import List, Dict, Any, Optional
+from typing import List, Dict, Any, Optional, Union
from fastapi import status
import logging
from pydantic import BaseModel, Field
@@ -23,6 +23,8 @@ from modules.interfaces.interfaceFeatures import getFeatureInterface
from modules.security.rbacCatalog import getCatalogService
from modules.security.rbac import RbacClass
from modules.security.rootAccess import getRootDbAppConnector
+from modules.shared.i18nRegistry import apiRouteContext, resolveText
+routeApiMsg = apiRouteContext("routeStore")
logger = logging.getLogger(__name__)
@@ -49,9 +51,9 @@ class StoreDeactivateRequest(BaseModel):
class StoreFeatureResponse(BaseModel):
"""Response model for a store feature."""
featureCode: str
- label: Dict[str, str]
+ label: str
icon: str
- description: Dict[str, str] = {}
+ description: str = ""
instances: List[Dict[str, Any]] = []
canActivate: bool
@@ -242,7 +244,9 @@ def getSubscriptionInfo(
"operative": operative is not None,
"maxDataVolumeMB": plan.maxDataVolumeMB if plan else None,
"maxFeatureInstances": plan.maxFeatureInstances if plan else None,
+ "includedModules": plan.includedModules if plan else 0,
"budgetAiCHF": plan.budgetAiCHF if plan else None,
+ "budgetAiPerUserCHF": plan.budgetAiPerUserCHF if plan else None,
"currentFeatureInstances": len(currentInstances),
"trialEndsAt": sub.get("trialEndsAt"),
}
@@ -285,8 +289,9 @@ def listStoreFeatures(
instances = _getUserInstancesForFeature(db, userId, featureCode, userMandateIds)
result.append(StoreFeatureResponse(
featureCode=featureCode,
- label=featureDef.get("label", {}),
+ label=resolveText(featureDef.get("label")),
icon=featureDef.get("icon", "mdi-puzzle"),
+ description=resolveText(featureDef.get("description")),
instances=instances,
canActivate=True,
))
@@ -327,7 +332,7 @@ def activateStoreFeature(
mandateId = data.mandateId
if not _isUserAdminInMandate(db, userId, mandateId):
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Not admin in target mandate")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Not admin in target mandate"))
# ── 1. Resolve subscription & plan ──────────────────────────────
from modules.datamodels.datamodelSubscription import MandateSubscription, BUILTIN_PLANS, SubscriptionStatusEnum
@@ -353,13 +358,15 @@ def activateStoreFeature(
)
raise HTTPException(
status_code=status.HTTP_402_PAYMENT_REQUIRED,
- detail="Kein aktives Abonnement. Bitte zuerst ein Abo abschliessen.",
+ detail=routeApiMsg("Kein aktives Abonnement. Bitte zuerst ein Abo abschliessen."),
)
planKey = operative.get("planKey", "")
plan = BUILTIN_PLANS.get(planKey)
hasStripeIds = bool(operative.get("stripeSubscriptionId") and operative.get("stripeItemIdInstances"))
- isBillable = hasStripeIds and plan is not None and (plan.pricePerFeatureInstanceCHF or 0) > 0
+ currentInstances = db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId})
+ willExceedIncluded = len(currentInstances) >= (plan.includedModules if plan else 0)
+ isBillable = hasStripeIds and plan is not None and (plan.pricePerFeatureInstanceCHF or 0) > 0 and willExceedIncluded
# ── 2. Capacity check ───────────────────────────────────────────
if plan and plan.maxFeatureInstances is not None:
@@ -367,12 +374,12 @@ def activateStoreFeature(
if len(currentInstances) >= plan.maxFeatureInstances:
raise HTTPException(
status_code=status.HTTP_402_PAYMENT_REQUIRED,
- detail=f"Feature-Instanz-Limit erreicht ({plan.maxFeatureInstances}). Bitte Plan upgraden.",
+ detail=f"Modul-Limit erreicht ({plan.maxFeatureInstances}). Bitte Plan upgraden.",
)
# ── 3. Provision instance ───────────────────────────────────────
featureInterface = getFeatureInterface(db)
- featureLabel = featureDef.get("label", {}).get("en", featureCode)
+ featureLabel = resolveText(featureDef.get("label"))
instance = featureInterface.createFeatureInstance(
featureCode=featureCode,
mandateId=mandateId,
@@ -382,7 +389,7 @@ def activateStoreFeature(
)
if not instance:
- raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create feature instance")
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=routeApiMsg("Failed to create feature instance"))
instanceId = instance.get("id") if isinstance(instance, dict) else instance.id
@@ -398,7 +405,7 @@ def activateStoreFeature(
_rollbackInstance(db, instanceId)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=f"Keine Feature-Admin-Rolle für {featureCode} gefunden — Rollback.",
+ detail=f"Keine Admin-Rolle für Modul {featureCode} gefunden — Rollback.",
)
rootInterface.createFeatureAccess(userId, instanceId, roleIds=[adminRoleId])
@@ -460,12 +467,12 @@ def deactivateStoreFeature(
# Verify instance exists in mandate
instances = db.getRecordset(FeatureInstance, recordFilter={"id": instanceId, "mandateId": mandateId})
if not instances:
- raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Feature instance not found in mandate")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("Feature instance not found in mandate"))
# Find user's FeatureAccess
accesses = db.getRecordset(FeatureAccess, recordFilter={"userId": userId, "featureInstanceId": instanceId})
if not accesses:
- raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No active access found")
+ raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=routeApiMsg("No active access found"))
featureAccessId = accesses[0].get("id")
db.recordDelete(FeatureAccess, featureAccessId)
@@ -533,7 +540,7 @@ def _notifyFeatureActivation(
priceLine = f"Kosten: CHF {plan.pricePerFeatureInstanceCHF:.2f} / {plan.billingPeriod.value} (anteilig via Stripe-Proration)."
bodyParagraphs = [
- f"Die Feature-Instanz «{featureLabel}» ({featureCode}) wurde soeben für Ihren Mandanten aktiviert.",
+ f"Das Modul «{featureLabel}» ({featureCode}) wurde soeben für Ihren Mandanten aktiviert.",
]
if priceLine:
bodyParagraphs.append(priceLine)
@@ -541,8 +548,8 @@ def _notifyFeatureActivation(
notifyMandateAdmins(
mandateId=mandateId,
- subject=f"Feature aktiviert: {featureLabel}",
- headline="Neue Feature-Instanz aktiviert",
+ subject=f"Modul aktiviert: {featureLabel}",
+ headline="Neues Modul aktiviert",
bodyParagraphs=bodyParagraphs,
)
except Exception as e:
diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py
index 97a7f23b..9d8fbfd3 100644
--- a/modules/routes/routeSubscription.py
+++ b/modules/routes/routeSubscription.py
@@ -23,10 +23,22 @@ from pydantic import BaseModel, Field
from modules.auth import limiter, getRequestContext, RequestContext
from modules.datamodels.datamodelPagination import PaginationParams, PaginatedResponse, PaginationMetadata, normalize_pagination_dict
from modules.routes.routeDataUsers import _applyFiltersAndSort, _extractDistinctValues
+from modules.shared.i18nRegistry import apiRouteContext, resolveText
+routeApiMsg = apiRouteContext("routeSubscription")
logger = logging.getLogger(__name__)
+def _planToDict(plan) -> Optional[Dict[str, Any]]:
+ """Serialize a SubscriptionPlan with resolved i18n title/description."""
+ if not plan:
+ return None
+ d = plan.model_dump()
+ d["title"] = resolveText(plan.title)
+ d["description"] = resolveText(plan.description)
+ return d
+
+
def _resolveMandateId(context: RequestContext) -> str:
if context.mandateId:
return str(context.mandateId)
@@ -53,7 +65,7 @@ def _assertMandateAdmin(context: RequestContext, mandateId: str) -> None:
return
except Exception:
pass
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Mandate admin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Mandate admin role required"))
# =============================================================================
@@ -76,11 +88,56 @@ class ForceCancelRequest(BaseModel):
class VerifyCheckoutRequest(BaseModel):
sessionId: str = Field(..., description="Stripe Checkout Session ID to verify")
+class SubscriptionUsage(BaseModel):
+ activeUsers: int = 0
+ activeInstances: int = 0
+ usedStorageMB: float = 0
+ maxStorageMB: Optional[float] = None
+ storagePercent: Optional[float] = None
+
class SubscriptionStatusResponse(BaseModel):
active: bool
subscription: Optional[Dict[str, Any]] = None
plan: Optional[Dict[str, Any]] = None
scheduled: Optional[Dict[str, Any]] = None
+ usage: Optional[SubscriptionUsage] = None
+
+
+def _computeUsage(mandateId: str, plan) -> SubscriptionUsage:
+ """Compute current usage metrics for a mandate's subscription."""
+ try:
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ from modules.datamodels.datamodelMembership import UserMandate
+ from modules.datamodels.datamodelFeatures import FeatureInstance
+ from modules.interfaces.interfaceDbKnowledge import aggregateMandateRagTotalBytes
+
+ rootIf = getRootInterface()
+
+ allUM = rootIf.db.getRecordset(UserMandate, recordFilter={"mandateId": mandateId})
+ activeUsers = len(allUM) if allUM else 0
+
+ allFI = rootIf.db.getRecordset(FeatureInstance, recordFilter={"mandateId": mandateId})
+ activeInstances = sum(
+ 1 for fi in (allFI or [])
+ if (fi.get("enabled") if isinstance(fi, dict) else getattr(fi, "enabled", False))
+ )
+
+ ragBytes = aggregateMandateRagTotalBytes(mandateId)
+ usedMB = round(ragBytes / (1024 * 1024), 2)
+
+ maxMB = plan.maxDataVolumeMB if plan else None
+ storagePercent = round((usedMB / maxMB) * 100, 1) if maxMB else None
+
+ return SubscriptionUsage(
+ activeUsers=activeUsers,
+ activeInstances=activeInstances,
+ usedStorageMB=usedMB,
+ maxStorageMB=maxMB,
+ storagePercent=storagePercent,
+ )
+ except Exception as e:
+ logger.warning("Failed to compute subscription usage: %s", e)
+ return SubscriptionUsage()
# =============================================================================
@@ -108,7 +165,7 @@ def getPlans(request: Request, context: RequestContext = Depends(getRequestConte
mandateId = _resolveMandateId(context)
subService = getSubscriptionService(context.user, mandateId)
plans = subService.getSelectablePlans()
- return [p.model_dump() for p in plans]
+ return [_planToDict(p) for p in plans]
except Exception as e:
logger.error("Error fetching plans: %s", e)
raise HTTPException(status_code=500, detail=str(e))
@@ -140,17 +197,21 @@ def getStatus(request: Request, context: RequestContext = Depends(getRequestCont
return SubscriptionStatusResponse(
active=False,
subscription=sub,
- plan=plan.model_dump() if plan else None,
+ plan=_planToDict(plan),
scheduled=scheduled,
)
return SubscriptionStatusResponse(active=False, scheduled=scheduled)
plan = subService.getPlan(operative.get("planKey", ""))
+
+ usage = _computeUsage(mandateId, plan)
+
return SubscriptionStatusResponse(
active=True,
subscription=operative,
- plan=plan.model_dump() if plan else None,
+ plan=_planToDict(plan),
scheduled=scheduled,
+ usage=usage,
)
except Exception as e:
logger.error("Error fetching status: %s", e)
@@ -169,7 +230,7 @@ def activatePlan(
)
mandateId = _resolveMandateId(context)
if not mandateId:
- raise HTTPException(status_code=400, detail="X-Mandate-Id header required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("X-Mandate-Id header required"))
_assertMandateAdmin(context, mandateId)
try:
@@ -195,7 +256,7 @@ def cancelSubscription(
)
mandateId = _resolveMandateId(context)
if not mandateId:
- raise HTTPException(status_code=400, detail="X-Mandate-Id header required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("X-Mandate-Id header required"))
_assertMandateAdmin(context, mandateId)
try:
@@ -221,7 +282,7 @@ def reactivateSubscription(
)
mandateId = _resolveMandateId(context)
if not mandateId:
- raise HTTPException(status_code=400, detail="X-Mandate-Id header required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("X-Mandate-Id header required"))
_assertMandateAdmin(context, mandateId)
try:
@@ -243,7 +304,7 @@ def forceCancel(
):
"""Sysadmin: immediately expire any non-terminal subscription."""
if not context.hasSysAdminRole:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Sysadmin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Sysadmin role required"))
from modules.serviceCenter.services.serviceSubscription.mainServiceSubscription import (
getService as getSubscriptionService,
@@ -251,7 +312,7 @@ def forceCancel(
from modules.interfaces.interfaceDbSubscription import _getRootInterface as getSubRootInterface
sub = getSubRootInterface().getById(data.subscriptionId)
if not sub:
- raise HTTPException(status_code=404, detail="Subscription not found")
+ raise HTTPException(status_code=404, detail=routeApiMsg("Subscription not found"))
mandateId = sub["mandateId"]
try:
@@ -278,7 +339,7 @@ def verifyCheckout(
"""
mandateId = _resolveMandateId(context)
if not mandateId:
- raise HTTPException(status_code=400, detail="X-Mandate-Id header required")
+ raise HTTPException(status_code=400, detail=routeApiMsg("X-Mandate-Id header required"))
_assertMandateAdmin(context, mandateId)
try:
@@ -288,7 +349,7 @@ def verifyCheckout(
session = stripeToDict(rawSession)
except Exception as e:
logger.error("Failed to retrieve checkout session %s: %s", data.sessionId, e)
- raise HTTPException(status_code=400, detail="Invalid session ID")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Invalid session ID"))
payStatus = session.get("payment_status")
if session.get("status") != "complete":
@@ -297,7 +358,7 @@ def verifyCheckout(
return {"status": "pending", "message": "Checkout not yet completed"}
if session.get("mode") != "subscription":
- raise HTTPException(status_code=400, detail="Not a subscription checkout session")
+ raise HTTPException(status_code=400, detail=routeApiMsg("Not a subscription checkout session"))
from modules.routes.routeBilling import _handleSubscriptionCheckoutCompleted
@@ -357,6 +418,34 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]:
operativeValues = {s.value for s in OPERATIVE_STATUSES}
+ operativeMandateIds = list({
+ sub.get("mandateId") for sub in allSubs
+ if sub.get("mandateId") and sub.get("status") in operativeValues
+ })
+
+ userCountMap: Dict[str, int] = {}
+ instanceCountMap: Dict[str, int] = {}
+ if operativeMandateIds:
+ try:
+ from modules.datamodels.datamodelMembership import UserMandate
+ from modules.datamodels.datamodelFeatures import FeatureInstance
+ from modules.security.rootAccess import getRootDbAppConnector
+ appDb = getRootDbAppConnector()
+ allUM = appDb.getRecordset(UserMandate, recordFilter={"mandateId": operativeMandateIds})
+ for um in (allUM or []):
+ mid = um.get("mandateId") if isinstance(um, dict) else getattr(um, "mandateId", None)
+ if mid:
+ userCountMap[mid] = userCountMap.get(mid, 0) + 1
+ allFI = appDb.getRecordset(FeatureInstance, recordFilter={"mandateId": operativeMandateIds})
+ for fi in (allFI or []):
+ fid = fi if isinstance(fi, dict) else fi.__dict__
+ if fid.get("enabled"):
+ mid = fid.get("mandateId")
+ if mid:
+ instanceCountMap[mid] = instanceCountMap.get(mid, 0) + 1
+ except Exception as e:
+ logger.warning("Batch count for subscriptions failed: %s", e)
+
enriched = []
for sub in allSubs:
mid = sub.get("mandateId", "")
@@ -364,18 +453,16 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]:
plan = BUILTIN_PLANS.get(planKey)
sub["mandateName"] = mandateNames.get(mid, mid[:8])
- sub["planTitle"] = (plan.title.get("de") or plan.title.get("en") or planKey) if plan else planKey
+ sub["planTitle"] = resolveText(plan.title) if plan else planKey
if sub.get("status") in operativeValues:
userPrice = sub.get("snapshotPricePerUserCHF", 0) or 0
instPrice = sub.get("snapshotPricePerInstanceCHF", 0) or 0
- try:
- userCount = subInterface.countActiveUsers(mid)
- instanceCount = subInterface.countActiveFeatureInstances(mid)
- except Exception:
- userCount = 0
- instanceCount = 0
- sub["monthlyRevenueCHF"] = round(userPrice * userCount + instPrice * instanceCount, 2)
+ userCount = userCountMap.get(mid, 0)
+ instanceCount = instanceCountMap.get(mid, 0)
+ includedModules = plan.includedModules if plan else 0
+ billableModules = max(0, instanceCount - includedModules)
+ sub["monthlyRevenueCHF"] = round(userPrice * userCount + instPrice * billableModules, 2)
sub["activeUsers"] = userCount
sub["activeInstances"] = instanceCount
else:
@@ -397,7 +484,7 @@ def getAllSubscriptions(
):
"""SysAdmin: list ALL subscriptions across all mandates with enriched metadata."""
if not context.hasSysAdminRole:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Sysadmin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Sysadmin role required"))
paginationParams: Optional[PaginationParams] = None
if pagination:
@@ -443,7 +530,7 @@ def getFilterValues(
):
"""Return distinct values for a column, respecting all active filters except the requested one."""
if not context.hasSysAdminRole:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Sysadmin role required")
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=routeApiMsg("Sysadmin role required"))
crossFilterParams: Optional[PaginationParams] = None
if pagination:
@@ -492,13 +579,13 @@ def _getDataVolumeUsage(
mgmtDb = getMgmtInterface().db
totalFileBytes = 0
- for instId in instIds:
- files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instId})
- for f in files:
+ if instIds:
+ files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instIds})
+ for f in (files or []):
size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0)
totalFileBytes += (size or 0)
mandateFiles = mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId})
- for f in mandateFiles:
+ for f in (mandateFiles or []):
size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0)
totalFileBytes += (size or 0)
filesMB = round(totalFileBytes / (1024 * 1024), 2)
diff --git a/modules/routes/routeSystem.py b/modules/routes/routeSystem.py
index f287d908..51ad8d1b 100644
--- a/modules/routes/routeSystem.py
+++ b/modules/routes/routeSystem.py
@@ -11,13 +11,16 @@ Navigation API Konzept:
"""
import logging
-from typing import Dict, List, Any, Optional
-from fastapi import APIRouter, Depends, Request, Query
+import time
+from collections import Counter
+from typing import Dict, List, Any, Optional, Set
+from fastapi import APIRouter, Depends, Request
from slowapi import Limiter
from slowapi.util import get_remote_address
from modules.auth.authentication import getRequestContext, RequestContext
from modules.system.mainSystem import NAVIGATION_SECTIONS, _objectKeyToUiComponent
+from modules.shared.i18nRegistry import resolveText
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.interfaces.interfaceFeatures import getFeatureInterface
from modules.datamodels.datamodelRbac import AccessRule, AccessRuleContext
@@ -102,11 +105,8 @@ def _getFeatureUiObjects(featureCode: str) -> List[Dict[str, Any]]:
elif featureCode == "realestate":
from modules.features.realEstate.mainRealEstate import UI_OBJECTS
return UI_OBJECTS
- elif featureCode == "automation":
- from modules.features.automation.mainAutomation import UI_OBJECTS
- return UI_OBJECTS
- elif featureCode == "automation2":
- from modules.features.automation2.mainAutomation2 import UI_OBJECTS
+ elif featureCode == "graphicalEditor":
+ from modules.features.graphicalEditor.mainGraphicalEditor import UI_OBJECTS
return UI_OBJECTS
elif featureCode == "teamsbot":
from modules.features.teamsbot.mainTeamsbot import UI_OBJECTS
@@ -133,11 +133,13 @@ def _getFeatureUiObjects(featureCode: str) -> List[Dict[str, Any]]:
def _buildDynamicBlock(
userId: str,
- language: str,
isSysAdmin: bool
) -> Optional[Dict[str, Any]]:
"""
Build the dynamic features block with mandates, features, and instances.
+ View and feature labels use resolveText() for the current request language
+ (same contract as static navigation items in _formatBlockItem).
+ Instance and mandate display names are user-defined and passed through as-is.
Returns None if user has no feature instances.
"""
@@ -184,23 +186,26 @@ def _buildDynamicBlock(
if featureKey not in featuresMap:
feature = featureInterface.getFeature(instance.featureCode)
- # Handle featureLabel - could be a dict or a Pydantic model (TextMultilingual)
+ # Handle featureLabel — TextMultilingual dict, plain str (German key), or legacy object
if feature and hasattr(feature, 'label'):
featureLabel = feature.label
- # Convert Pydantic model to dict if needed
if hasattr(featureLabel, 'model_dump'):
featureLabel = featureLabel.model_dump()
- elif hasattr(featureLabel, 'dict'):
- featureLabel = featureLabel.dict()
+ elif isinstance(featureLabel, str):
+ pass
elif not isinstance(featureLabel, dict):
- # Fallback: try to access as attributes
- featureLabel = {"de": getattr(featureLabel, 'de', instance.featureCode), "en": getattr(featureLabel, 'en', instance.featureCode)}
+ featureLabel = {
+ "de": getattr(featureLabel, 'de', instance.featureCode),
+ "en": getattr(featureLabel, 'en', instance.featureCode),
+ }
else:
featureLabel = {"de": instance.featureCode, "en": instance.featureCode}
+ resolvedFeatureLabel = resolveText(featureLabel)
+
featuresMap[featureKey] = {
"uiComponent": f"feature.{instance.featureCode}",
- "uiLabel": featureLabel.get(language, featureLabel.get("en", instance.featureCode)),
+ "uiLabel": resolvedFeatureLabel,
"order": 10,
"instances": [],
"_mandateId": mandateId,
@@ -233,9 +238,8 @@ def _buildDynamicBlock(
# Build path for this view
viewPath = f"/mandates/{mandateId}/{instance.featureCode}/{instance.id}/{viewName}"
- # Get label in requested language
- label = uiObj.get("label", {})
- uiLabel = label.get(language, label.get("en", viewName))
+ rawViewLabel = uiObj.get("label")
+ uiLabel = resolveText(rawViewLabel) if rawViewLabel not in (None, "") else ""
views.append({
"uiComponent": f"page.feature.{instance.featureCode}.{viewName}",
@@ -252,6 +256,7 @@ def _buildDynamicBlock(
featuresMap[featureKey]["instances"].append({
"id": str(instance.id),
"uiLabel": instance.label,
+ "featureCode": instance.featureCode,
"order": 10,
"views": views,
"isAdmin": permissions.get("isAdmin", False),
@@ -352,7 +357,6 @@ def _getInstanceViewPermissions(
def _filterItems(
items: List[Dict[str, Any]],
- language: str,
isSysAdmin: bool,
roleIds: List[str],
hasGlobalPermission: bool
@@ -366,19 +370,18 @@ def _filterItems(
if item.get("sysAdminOnly") and not isSysAdmin:
continue
if item.get("public"):
- filteredItems.append(_formatBlockItem(item, language))
+ filteredItems.append(_formatBlockItem(item))
continue
if isSysAdmin:
- filteredItems.append(_formatBlockItem(item, language))
+ filteredItems.append(_formatBlockItem(item))
continue
if hasGlobalPermission or _checkUiPermission(roleIds, item["objectKey"]):
- filteredItems.append(_formatBlockItem(item, language))
+ filteredItems.append(_formatBlockItem(item))
filteredItems.sort(key=lambda i: i["order"])
return filteredItems
def _buildStaticBlocks(
- language: str,
isSysAdmin: bool,
roleIds: List[str],
hasGlobalPermission: bool
@@ -386,8 +389,8 @@ def _buildStaticBlocks(
"""
Build static navigation blocks from NAVIGATION_SECTIONS.
- Returns list of blocks with items filtered by permissions.
- Supports subgroups within sections.
+ Keys are registered at import time via t() in mainSystem.py.
+ At request time, resolveText() translates them to the current language.
"""
blocks = []
@@ -395,43 +398,50 @@ def _buildStaticBlocks(
if section.get("adminOnly") and not isSysAdmin:
continue
- # Handle sections with subgroups
- if "subgroups" in section:
+ hasSubgroups = "subgroups" in section
+ hasItems = "items" in section and len(section["items"]) > 0
+
+ if hasSubgroups:
filteredSubgroups = []
for subgroup in section["subgroups"]:
subItems = _filterItems(
- subgroup.get("items", []), language, isSysAdmin, roleIds, hasGlobalPermission
+ subgroup.get("items", []), isSysAdmin, roleIds, hasGlobalPermission
)
if subItems:
filteredSubgroups.append({
"id": subgroup["id"],
- "title": subgroup["title"].get(language, subgroup["title"].get("en", subgroup["id"])),
+ "title": resolveText(subgroup["title"]),
"order": subgroup.get("order", 50),
"items": subItems,
})
-
+
filteredSubgroups.sort(key=lambda s: s["order"])
-
- if filteredSubgroups:
+
+ topLevelItems = []
+ if hasItems:
+ topLevelItems = _filterItems(
+ section["items"], isSysAdmin, roleIds, hasGlobalPermission
+ )
+
+ if filteredSubgroups or topLevelItems:
blocks.append({
"type": "static",
"id": section["id"],
- "title": section["title"].get(language, section["title"].get("en", section["id"])),
+ "title": resolveText(section["title"]),
"order": section.get("order", 50),
- "items": [],
+ "items": topLevelItems,
"subgroups": filteredSubgroups,
})
else:
- # Standard flat section
filteredItems = _filterItems(
- section.get("items", []), language, isSysAdmin, roleIds, hasGlobalPermission
+ section.get("items", []), isSysAdmin, roleIds, hasGlobalPermission
)
-
+
if filteredItems:
blocks.append({
"type": "static",
"id": section["id"],
- "title": section["title"].get(language, section["title"].get("en", section["id"])),
+ "title": resolveText(section["title"]),
"order": section.get("order", 50),
"items": filteredItems,
})
@@ -439,19 +449,14 @@ def _buildStaticBlocks(
return blocks
-def _formatBlockItem(item: Dict[str, Any], language: str) -> Dict[str, Any]:
- """
- Format a navigation item for the new API response.
-
- Uses new field names: uiComponent, uiLabel, uiPath
- Does NOT include icon (UI maps via uiComponent)
- """
+def _formatBlockItem(item: Dict[str, Any]) -> Dict[str, Any]:
+ """Format a navigation item for the API response."""
objectKey = item["objectKey"]
uiComponent = _objectKeyToUiComponent(objectKey)
return {
"uiComponent": uiComponent,
- "uiLabel": item["label"].get(language, item["label"].get("en", item["id"])),
+ "uiLabel": resolveText(item["label"]),
"uiPath": item["path"],
"order": item.get("order", 50),
"objectKey": objectKey,
@@ -462,52 +467,15 @@ def _formatBlockItem(item: Dict[str, Any], language: str) -> Dict[str, Any]:
@limiter.limit("60/minute")
def get_navigation(
request: Request,
- language: str = Query("de", description="Language for labels (en, de, fr)"),
reqContext: RequestContext = Depends(getRequestContext)
) -> Dict[str, Any]:
"""
Get unified navigation structure with blocks.
- Single Source of Truth für Navigation - UI rendert nur was es erhält.
+ Static items and dynamic feature/view labels are resolved with resolveText()
+ for the current request language. User-defined instance/mandate names are raw.
Endpoint: GET /api/navigation
-
- Block order:
- - System (10)
- - Dynamic/Features (15) - only if user has feature instances
- - Workflows (20)
- - Basisdaten (30)
- - Migrate (40)
- - Administration (200)
-
- Response format:
- {
- "language": "de",
- "blocks": [
- {
- "type": "static",
- "id": "system",
- "title": "SYSTEM",
- "order": 10,
- "items": [
- {
- "uiComponent": "page.system.home",
- "uiLabel": "Übersicht",
- "uiPath": "/",
- "order": 10,
- "objectKey": "ui.system.home"
- }
- ]
- },
- {
- "type": "dynamic",
- "id": "features",
- "title": "MEINE FEATURES",
- "order": 15,
- "mandates": [...]
- }
- ]
- }
"""
try:
isSysAdmin = reqContext.hasSysAdminRole
@@ -524,11 +492,11 @@ def get_navigation(
hasGlobalPermission = _checkUiPermission(roleIds, "_global_check")
# Build static blocks from NAVIGATION_SECTIONS
- blocks = _buildStaticBlocks(language, isSysAdmin, roleIds, hasGlobalPermission)
+ blocks = _buildStaticBlocks(isSysAdmin, roleIds, hasGlobalPermission)
# Build dynamic block (features) if user has feature instances
if userId:
- dynamicBlock = _buildDynamicBlock(userId, language, isSysAdmin)
+ dynamicBlock = _buildDynamicBlock(userId, isSysAdmin)
if dynamicBlock:
blocks.append(dynamicBlock)
@@ -536,14 +504,467 @@ def get_navigation(
blocks.sort(key=lambda b: b["order"])
return {
- "language": language,
"blocks": blocks,
}
except Exception as e:
logger.error(f"Error getting navigation: {e}")
return {
- "language": language,
"blocks": [],
"error": str(e),
+ }
+
+
+# =============================================================================
+# AI models (integrations overview)
+# =============================================================================
+
+
+def _buildIntegrationsOverviewPayload(userId: str, user=None) -> Dict[str, Any]:
+ """
+ Single payload for the Integrations architecture page: real UserConnections,
+ DataSource / FeatureDataSource rows, trustee accounting bindings, AICore
+ connector modules (not individual models), extractor extensions and renderer
+ formats from registries, platform infra tools, and live KPI stats.
+ """
+ root = getRootInterface()
+ out: Dict[str, Any] = {
+ "aicoreModules": [],
+ "infraTools": [],
+ "extractorExtensions": [],
+ "extractorClasses": [],
+ "rendererFormats": [],
+ "rendererClasses": [],
+ "dataLayerItems": [],
+ "liveStats": {},
+ "errors": [],
}
+
+ _PROVIDER_LABELS = {
+ "anthropic": "Anthropic (Claude)",
+ "openai": "OpenAI (GPT)",
+ "mistral": "Mistral (Le Chat)",
+ "perplexity": "Perplexity",
+ "tavily": "Tavily (Websuche)",
+ "privatellm": "Private LLM",
+ "internal": "Intern",
+ }
+
+ # --- AICore: one entry per connector module + model counts ---
+ try:
+ from modules.aicore.aicoreModelRegistry import modelRegistry
+
+ modelRegistry.ensureConnectorsRegistered()
+ modelRegistry.refreshModels(force=False)
+ counts = Counter()
+ for m in modelRegistry.getModels():
+ if not getattr(m, "isAvailable", True):
+ continue
+ counts[str(getattr(m, "connectorType", "") or "")] += 1
+ modules: List[Dict[str, Any]] = []
+ for conn in modelRegistry.discoverConnectors():
+ ct = conn.getConnectorType()
+ modules.append(
+ {
+ "connectorType": ct,
+ "label": _PROVIDER_LABELS.get(ct, ct),
+ "modelCount": int(counts.get(ct, 0)),
+ }
+ )
+ out["aicoreModules"] = modules
+ except Exception as e:
+ logger.error(f"integrations-overview aicore: {e}")
+ out["errors"].append(f"aicore: {e}")
+
+ # --- Extractors (registered extensions, unique + per-class rows) ---
+ try:
+ from modules.serviceCenter.services.serviceExtraction.mainServiceExtraction import ExtractionService
+ from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry
+
+ if ExtractionService._sharedExtractorRegistry is None:
+ ExtractionService._sharedExtractorRegistry = ExtractorRegistry()
+ reg = ExtractionService._sharedExtractorRegistry
+ ext_map = reg.getExtensionToMimeMap()
+ uniq = sorted({str(k).upper() for k in ext_map.keys() if k and "." not in str(k)})
+ out["extractorExtensions"] = uniq
+
+ seen_ext: Set[int] = set()
+ class_rows: List[Dict[str, Any]] = []
+ for extractor in reg._map.values():
+ eid = id(extractor)
+ if eid in seen_ext:
+ continue
+ seen_ext.add(eid)
+ if not hasattr(extractor, "getSupportedExtensions"):
+ continue
+ raw_exts = extractor.getSupportedExtensions()
+ if not raw_exts:
+ continue
+ norm = sorted({str(x).lstrip(".").lower() for x in raw_exts if x})
+ if norm:
+ class_rows.append({"className": extractor.__class__.__name__, "extensions": norm})
+ class_rows.sort(key=lambda r: r["className"])
+ out["extractorClasses"] = class_rows
+ fb = getattr(reg, "_fallback", None)
+ if fb and hasattr(fb, "getSupportedExtensions") and id(fb) not in seen_ext:
+ raw_exts = fb.getSupportedExtensions()
+ if raw_exts:
+ norm = sorted({str(x).lstrip(".").lower() for x in raw_exts if x})
+ if norm:
+ out["extractorClasses"].append({"className": fb.__class__.__name__, "extensions": norm})
+ out["extractorClasses"].sort(key=lambda r: r["className"])
+ except Exception as e:
+ logger.error(f"integrations-overview extractors: {e}")
+ out["errors"].append(f"extractors: {e}")
+
+ # --- Renderers (registered output formats + per-class rows) ---
+ try:
+ from modules.serviceCenter.services.serviceGeneration.renderers.registry import getSupportedFormats, getRendererInfo
+
+ out["rendererFormats"] = sorted(getSupportedFormats())
+
+ by_renderer_class: Dict[str, Dict[str, Any]] = {}
+ for composite_key, meta in getRendererInfo().items():
+ cn = meta.get("class_name") or ""
+ if not cn:
+ continue
+ fmt = composite_key.split(":")[0] if ":" in composite_key else composite_key
+ if cn not in by_renderer_class:
+ by_renderer_class[cn] = {"className": cn, "formats": set()}
+ by_renderer_class[cn]["formats"].add(fmt)
+ renderer_rows = [
+ {"className": d["className"], "formats": sorted(d["formats"])}
+ for _, d in sorted(by_renderer_class.items(), key=lambda x: x[0])
+ ]
+ out["rendererClasses"] = renderer_rows
+ except Exception as e:
+ logger.error(f"integrations-overview renderers: {e}")
+ out["errors"].append(f"renderers: {e}")
+
+ # --- Platform infra tools (only routes that exist in this deployment) ---
+ out["infraTools"] = [
+ {"id": "voice", "label": "Voice / STT"},
+ ]
+
+ accessible_instance_ids: Set[str] = set()
+ try:
+ for access in root.getFeatureAccessesForUser(userId):
+ if not getattr(access, "enabled", True):
+ continue
+ accessible_instance_ids.add(str(access.featureInstanceId))
+ except Exception as e:
+ logger.debug(f"integrations-overview feature accesses: {e}")
+
+ # --- UserConnection (active only) ---
+ try:
+ from modules.datamodels.datamodelUam import ConnectionStatus
+
+ for c in root.getUserConnections(userId):
+ st = c.status
+ st_val = st.value if hasattr(st, "value") else str(st)
+ if st_val != ConnectionStatus.ACTIVE.value:
+ continue
+ dumped = c.model_dump(mode="json")
+ dumped["kind"] = "userConnection"
+ out["dataLayerItems"].append(dumped)
+ except Exception as e:
+ logger.error(f"integrations-overview connections: {e}")
+ out["errors"].append(f"connections: {e}")
+
+ # --- instance label lookup (shared by DataSource & Trustee blocks) ---
+ _instLabelCache: Dict[str, str] = {}
+ def _getInstanceLabel(iid: str) -> str:
+ if iid in _instLabelCache:
+ return _instLabelCache[iid]
+ try:
+ _fi = getFeatureInterface(root.db)
+ inst = _fi.getFeatureInstance(iid)
+ lbl = getattr(inst, "label", None) or getattr(inst, "uiLabel", None) or ""
+ _instLabelCache[iid] = lbl
+ except Exception:
+ _instLabelCache[iid] = ""
+ return _instLabelCache[iid]
+
+ # --- DataSource & FeatureDataSource ---
+ try:
+ from modules.datamodels.datamodelDataSource import DataSource
+ from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
+
+ seen_ds: Set[str] = set()
+
+ for row in root.db.getRecordset(DataSource, recordFilter={"userId": userId}) or []:
+ rid = str(row.get("id", ""))
+ if not rid or rid in seen_ds:
+ continue
+ seen_ds.add(rid)
+ out["dataLayerItems"].append(
+ {
+ "kind": "dataSource",
+ "id": rid,
+ "label": row.get("label") or row.get("displayPath") or rid,
+ "sourceType": row.get("sourceType") or "",
+ "featureInstanceId": row.get("featureInstanceId"),
+ "mandateId": row.get("mandateId"),
+ "connectionId": row.get("connectionId"),
+ }
+ )
+
+ for iid in accessible_instance_ids:
+ for row in root.db.getRecordset(DataSource, recordFilter={"featureInstanceId": iid}) or []:
+ rid = str(row.get("id", ""))
+ if not rid or rid in seen_ds:
+ continue
+ seen_ds.add(rid)
+ out["dataLayerItems"].append(
+ {
+ "kind": "dataSource",
+ "id": rid,
+ "label": row.get("label") or row.get("displayPath") or rid,
+ "sourceType": row.get("sourceType") or "",
+ "featureInstanceId": row.get("featureInstanceId"),
+ "mandateId": row.get("mandateId"),
+ "connectionId": row.get("connectionId"),
+ }
+ )
+
+ seen_fds: Set[str] = set()
+ for row in root.db.getRecordset(FeatureDataSource, recordFilter={"userId": userId}) or []:
+ rid = str(row.get("id", ""))
+ if not rid or rid in seen_fds:
+ continue
+ seen_fds.add(rid)
+ fds_iid = row.get("featureInstanceId") or ""
+ out["dataLayerItems"].append(
+ {
+ "kind": "featureDataSource",
+ "id": rid,
+ "label": row.get("label") or rid,
+ "featureCode": row.get("featureCode") or "",
+ "tableName": row.get("tableName") or "",
+ "featureInstanceId": fds_iid,
+ "mandateId": row.get("mandateId"),
+ "instanceLabel": _getInstanceLabel(fds_iid) if fds_iid else "",
+ }
+ )
+
+ for iid in accessible_instance_ids:
+ for row in root.db.getRecordset(FeatureDataSource, recordFilter={"featureInstanceId": iid}) or []:
+ rid = str(row.get("id", ""))
+ if not rid or rid in seen_fds:
+ continue
+ seen_fds.add(rid)
+ out["dataLayerItems"].append(
+ {
+ "kind": "featureDataSource",
+ "id": rid,
+ "label": row.get("label") or rid,
+ "featureCode": row.get("featureCode") or "",
+ "tableName": row.get("tableName") or "",
+ "featureInstanceId": iid,
+ "mandateId": row.get("mandateId"),
+ "instanceLabel": _getInstanceLabel(iid),
+ }
+ )
+ except Exception as e:
+ logger.error(f"integrations-overview datasources: {e}")
+ out["errors"].append(f"datasources: {e}")
+
+ # --- Trustee accounting systems (configured integrations per instance) ---
+ try:
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
+
+ fi = getFeatureInterface(root.db)
+ seen_acc: Set[str] = set()
+ for iid in accessible_instance_ids:
+ inst = fi.getFeatureInstance(iid)
+ if not inst or inst.featureCode != "trustee":
+ continue
+ for row in root.db.getRecordset(
+ TrusteeAccountingConfig,
+ recordFilter={"featureInstanceId": iid, "isActive": True},
+ ) or []:
+ rid = str(row.get("id", ""))
+ if not rid or rid in seen_acc:
+ continue
+ seen_acc.add(rid)
+ out["dataLayerItems"].append(
+ {
+ "kind": "trusteeAccounting",
+ "id": rid,
+ "featureInstanceId": iid,
+ "instanceLabel": getattr(inst, "label", None) or "",
+ "mandateId": str(getattr(inst, "mandateId", "") or ""),
+ "connectorType": row.get("connectorType") or "",
+ "displayLabel": row.get("displayLabel") or row.get("connectorType") or rid,
+ }
+ )
+ except Exception as e:
+ logger.error(f"integrations-overview trustee accounting: {e}")
+ out["errors"].append(f"trusteeAccounting: {e}")
+
+ # --- Live stats (billing AI calls + workflow metrics) ---
+ liveStats: Dict[str, Any] = {
+ "aiCallCount": 0,
+ "aiCallPeriodDays": 30,
+ "totalWorkflows": 0,
+ "activeWorkflows": 0,
+ "totalRuns": 0,
+ "totalTokens": 0,
+ }
+
+ # Billing: count AI transactions in the last 30 days
+ if user is not None:
+ try:
+ from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface
+
+ mandateIds: List[str] = []
+ for um in root.getUserMandates(userId):
+ mid = getattr(um, "mandateId", None)
+ if mid and getattr(um, "enabled", True):
+ mandateIds.append(str(mid))
+
+ if mandateIds:
+ bi = getBillingInterface(user, mandateIds[0])
+ now = time.time()
+ startTs = now - 30 * 86400
+ stats = bi.getTransactionStatisticsAggregated(
+ mandateIds=mandateIds,
+ scope="all",
+ userId=userId,
+ startTs=startTs,
+ endTs=now,
+ period="month",
+ )
+ liveStats["aiCallCount"] = stats.get("transactionCount", 0)
+ except Exception as e:
+ logger.debug(f"integrations-overview billing stats: {e}")
+
+ # Workflow metrics (same logic as routeWorkflowDashboard.get_workflow_metrics)
+ try:
+ from modules.shared.configuration import APP_CONFIG
+ from modules.connectors.connectorDbPostgre import DatabaseConnector
+ from modules.datamodels.datamodelPagination import PaginationParams
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
+ AutoWorkflow, AutoRun,
+ )
+
+ wfDb = DatabaseConnector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase="poweron_graphicaleditor",
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
+ dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
+ userId=None,
+ )
+
+ if wfDb._ensureTableExists(AutoWorkflow):
+ mandateIds_wf: List[str] = []
+ for um in root.getUserMandates(userId):
+ mid = getattr(um, "mandateId", None)
+ if mid and getattr(um, "enabled", True):
+ mandateIds_wf.append(str(mid))
+
+ wfFilter: dict = {"isTemplate": False}
+ if mandateIds_wf:
+ wfFilter["mandateId"] = mandateIds_wf
+ else:
+ wfFilter["mandateId"] = "__impossible__"
+
+ wfCount = wfDb.getRecordsetPaginated(
+ AutoWorkflow,
+ pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=wfFilter,
+ )
+ liveStats["totalWorkflows"] = (
+ wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems
+ )
+
+ activeFilter = dict(wfFilter)
+ activeFilter["active"] = True
+ activeCount = wfDb.getRecordsetPaginated(
+ AutoWorkflow,
+ pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=activeFilter,
+ )
+ liveStats["activeWorkflows"] = (
+ activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems
+ )
+
+ if wfDb._ensureTableExists(AutoRun):
+ runFilter: dict = {}
+ if mandateIds_wf:
+ runFilter["mandateId"] = mandateIds_wf
+ else:
+ runFilter["ownerId"] = userId
+
+ runCount = wfDb.getRecordsetPaginated(
+ AutoRun,
+ pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=runFilter,
+ )
+ liveStats["totalRuns"] = (
+ runCount.get("totalItems", 0) if isinstance(runCount, dict) else runCount.totalItems
+ )
+
+ totalTokens = 0
+ totalRuns = liveStats["totalRuns"]
+ if 0 < totalRuns <= 10000:
+ allRuns = wfDb.getRecordset(
+ AutoRun, recordFilter=runFilter, fieldFilter=["costTokens"],
+ ) or []
+ for r in allRuns:
+ totalTokens += r.get("costTokens", 0) or 0
+ liveStats["totalTokens"] = totalTokens
+ except Exception as e:
+ logger.debug(f"integrations-overview workflow stats: {e}")
+
+ out["liveStats"] = liveStats
+
+ return out
+
+
+@router.get("/integrations-overview")
+@limiter.limit("30/minute")
+def get_integrations_overview(
+ request: Request,
+ reqContext: RequestContext = Depends(getRequestContext),
+) -> Dict[str, Any]:
+ """Aggregated, non-fictitious data for the PORTA integrations diagram."""
+ user_id = str(reqContext.user.id)
+ return _buildIntegrationsOverviewPayload(user_id, user=reqContext.user)
+
+
+@router.get("/ai-models")
+@limiter.limit("60/minute")
+def get_ai_models_for_integrations(
+ request: Request,
+ reqContext: RequestContext = Depends(getRequestContext),
+) -> Dict[str, Any]:
+ """
+ Registered AI models for the Integrations architecture page.
+ Returns unique displayName entries with connector metadata (no callables).
+ """
+ try:
+ from modules.aicore.aicoreModelRegistry import modelRegistry
+
+ modelRegistry.ensureConnectorsRegistered()
+ modelRegistry.refreshModels(force=False)
+ models = modelRegistry.getModels()
+ out: List[Dict[str, Any]] = []
+ seen: set = set()
+ for m in models:
+ if not getattr(m, "isAvailable", True):
+ continue
+ key = (m.displayName, m.connectorType)
+ if key in seen:
+ continue
+ seen.add(key)
+ dumped = m.model_dump(
+ exclude={"functionCall", "functionCallStream", "calculatepriceCHF"},
+ mode="json",
+ )
+ out.append(dumped)
+ return {"models": out}
+ except Exception as e:
+ logger.error(f"Error listing AI models: {e}")
+ return {"models": [], "error": str(e)}
diff --git a/modules/routes/routeVoiceGoogle.py b/modules/routes/routeVoiceGoogle.py
index 309e59bb..dfa1a15e 100644
--- a/modules/routes/routeVoiceGoogle.py
+++ b/modules/routes/routeVoiceGoogle.py
@@ -12,13 +12,11 @@ import json
import base64
import secrets
import time
-from fastapi import APIRouter, File, Form, UploadFile, Depends, HTTPException, Body, Query, Request, WebSocket, WebSocketDisconnect
-from fastapi.responses import Response
+from fastapi import APIRouter, Depends, HTTPException, Query, Request, WebSocket, WebSocketDisconnect
from typing import Optional, Dict, Any, List
from modules.auth import getCurrentUser, getRequestContext, RequestContext, limiter
from modules.datamodels.datamodelUam import User
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface, VoiceObjects
-
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/voice-google", tags=["Voice Google"])
@@ -61,299 +59,6 @@ def _getVoiceInterface(currentUser: User) -> VoiceObjects:
detail=f"Failed to initialize voice interface: {str(e)}"
)
-@router.post("/speech-to-text")
-async def speech_to_text(
- audioFile: UploadFile = File(...),
- language: str = Form("de-DE"),
- currentUser: User = Depends(getCurrentUser)
-):
- """Convert speech to text using Google Cloud Speech-to-Text API."""
- try:
- logger.info(f"🎤 Speech-to-text request: {audioFile.filename}, language: {language}")
-
- # Read audio file
- audioContent = await audioFile.read()
- logger.info(f"📊 Audio file size: {len(audioContent)} bytes")
-
- # Get voice interface
- voiceInterface = _getVoiceInterface(currentUser)
-
- # Validate audio format
- validation = voiceInterface.validateAudioFormat(audioContent)
-
- if not validation["valid"]:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid audio format: {validation.get('error', 'Unknown error')}"
- )
-
- # Perform speech recognition
- result = await voiceInterface.speechToText(
- audioContent=audioContent,
- language=language
- )
-
- if result["success"]:
- return {
- "success": True,
- "text": result["text"],
- "confidence": result["confidence"],
- "language": result["language"],
- "audio_info": {
- "size": len(audioContent),
- "format": validation["format"],
- "estimated_duration": validation.get("estimated_duration", 0)
- }
- }
- else:
- raise HTTPException(
- status_code=400,
- detail=f"Speech recognition failed: {result.get('error', 'Unknown error')}"
- )
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"❌ Speech-to-text error: {e}")
- raise HTTPException(
- status_code=500,
- detail=f"Speech-to-text processing failed: {str(e)}"
- )
-
-@router.post("/detect-language")
-async def detect_language(
- text: str = Form(...),
- currentUser: User = Depends(getCurrentUser)
-):
- """Detect the language of text using Google Cloud Translation API."""
- try:
- logger.info(f"🔍 Language detection request: '{text[:100]}...'")
-
- if not text.strip():
- raise HTTPException(
- status_code=400,
- detail="Empty text provided for language detection"
- )
-
- # Get voice interface
- voiceInterface = _getVoiceInterface(currentUser)
-
- # Perform language detection
- result = await voiceInterface.detectLanguage(text)
-
- if result["success"]:
- return {
- "success": True,
- "language": result["language"],
- "confidence": result.get("confidence", 1.0)
- }
- else:
- raise HTTPException(
- status_code=400,
- detail=f"Language detection failed: {result.get('error', 'Unknown error')}"
- )
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"❌ Language detection error: {e}")
- raise HTTPException(
- status_code=500,
- detail=f"Language detection processing failed: {str(e)}"
- )
-
-@router.post("/translate")
-async def translate_text(
- text: str = Form(...),
- sourceLanguage: str = Form("de"),
- targetLanguage: str = Form("en"),
- currentUser: User = Depends(getCurrentUser)
-):
- """Translate text using Google Cloud Translation API."""
- try:
- logger.info(f"🌐 Translation request: '{text}' ({sourceLanguage} -> {targetLanguage})")
-
- if not text.strip():
- raise HTTPException(
- status_code=400,
- detail="Empty text provided for translation"
- )
-
- # Get voice interface
- voiceInterface = _getVoiceInterface(currentUser)
-
- # Perform translation
- result = await voiceInterface.translateText(
- text=text,
- sourceLanguage=sourceLanguage,
- targetLanguage=targetLanguage
- )
-
- if result["success"]:
- return {
- "success": True,
- "original_text": result["original_text"],
- "translated_text": result["translated_text"],
- "source_language": result["source_language"],
- "target_language": result["target_language"]
- }
- else:
- raise HTTPException(
- status_code=400,
- detail=f"Translation failed: {result.get('error', 'Unknown error')}"
- )
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"❌ Translation error: {e}")
- raise HTTPException(
- status_code=500,
- detail=f"Translation processing failed: {str(e)}"
- )
-
-@router.post("/realtime-interpreter")
-async def realtime_interpreter(
- audioFile: UploadFile = File(...),
- fromLanguage: str = Form("de-DE"),
- toLanguage: str = Form("en-US"),
- connectionId: str = Form(None),
- currentUser: User = Depends(getCurrentUser)
-):
- """Real-time interpreter: speech to translated text using Google Cloud APIs."""
- try:
- logger.info(f"🔄 Real-time interpreter request: {audioFile.filename}")
- logger.info(f" From: {fromLanguage} -> To: {toLanguage}")
- logger.info(f" MIME type: {audioFile.content_type}")
-
- # Read audio file
- audioContent = await audioFile.read()
- logger.info(f"📊 Audio file size: {len(audioContent)} bytes")
-
- # Save audio file for debugging with correct extension
- # file_extension = "webm" if audio_file.filename.endswith('.webm') else "wav"
- # debug_filename = f"debug_audio/audio_google_{audio_file.filename.replace('.wav', '.webm')}"
- # os.makedirs("debug_audio", exist_ok=True)
- # with open(debug_filename, "wb") as f:
- # f.write(audio_content)
- # logger.info(f"💾 Saved audio file for debugging: {debug_filename}")
-
- # Get voice interface
- voiceInterface = _getVoiceInterface(currentUser)
-
- # Validate audio format
- validation = voiceInterface.validateAudioFormat(audioContent)
-
- if not validation["valid"]:
- raise HTTPException(
- status_code=400,
- detail=f"Invalid audio format: {validation.get('error', 'Unknown error')}"
- )
-
- # Perform complete pipeline: Speech-to-Text + Translation
- result = await voiceInterface.speechToTranslatedText(
- audioContent=audioContent,
- fromLanguage=fromLanguage,
- toLanguage=toLanguage
- )
-
- if result["success"]:
- logger.info(f"✅ Real-time interpreter successful:")
- logger.info(f" Original: '{result['original_text']}'")
- logger.info(f" Translated: '{result['translated_text']}'")
-
- return {
- "success": True,
- "original_text": result["original_text"],
- "translated_text": result["translated_text"],
- "confidence": result["confidence"],
- "source_language": result["source_language"],
- "target_language": result["target_language"],
- "audio_info": {
- "size": len(audioContent),
- "format": validation["format"],
- "estimated_duration": validation.get("estimated_duration", 0)
- }
- }
- else:
- raise HTTPException(
- status_code=400,
- detail=f"Real-time interpreter failed: {result.get('error', 'Unknown error')}"
- )
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"❌ Real-time interpreter error: {e}")
- raise HTTPException(
- status_code=500,
- detail=f"Real-time interpreter processing failed: {str(e)}"
- )
-
-
-@router.post("/text-to-speech")
-async def text_to_speech(
- request: Request,
- text: str = Form(...),
- language: str = Form("de-DE"),
- voice: str = Form(None),
- context: RequestContext = Depends(getRequestContext),
-):
- """Convert text to speech using Google Cloud Text-to-Speech."""
- try:
- logger.info(f"Text-to-Speech request: '{text[:50]}...' in {language}")
-
- if not text.strip():
- raise HTTPException(
- status_code=400,
- detail="Empty text provided for text-to-speech"
- )
-
- mandateId = str(getattr(context, "mandateId", "") or "")
- voiceInterface = getVoiceInterface(context.user, mandateId)
- try:
- from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService
- billingService = getBillingService(context.user, mandateId)
- def _billingCb(data):
- priceCHF = data.get("priceCHF", 0.0)
- operation = data.get("operation", "voice")
- if priceCHF > 0:
- billingService.recordUsage(priceCHF=priceCHF, aicoreProvider="google-voice", aicoreModel=operation, description=f"Voice {operation}")
- voiceInterface.billingCallback = _billingCb
- except Exception as e:
- logger.warning(f"TTS billing setup skipped: {e}")
-
- result = await voiceInterface.textToSpeech(
- text=text,
- languageCode=language,
- voiceName=voice
- )
-
- if result["success"]:
- return Response(
- content=result["audioContent"],
- media_type="audio/mpeg",
- headers={
- "Content-Disposition": "attachment; filename=speech.mp3",
- "X-Voice-Name": result.get("voiceName", ""),
- "X-Language-Code": result.get("languageCode", language),
- }
- )
- else:
- raise HTTPException(
- status_code=400,
- detail=f"Text-to-Speech failed: {result.get('error', 'Unknown error')}"
- )
-
- except HTTPException:
- raise
- except Exception as e:
- logger.error(f"Text-to-Speech error: {e}")
- raise HTTPException(
- status_code=500,
- detail=f"Text-to-Speech processing failed: {str(e)}"
- )
-
@router.get("/languages")
async def get_available_languages(currentUser: User = Depends(getCurrentUser)):
"""Get available languages from Google Cloud Text-to-Speech."""
@@ -424,71 +129,6 @@ async def get_available_voices(
detail=f"Failed to get available voices: {str(e)}"
)
-@router.get("/health")
-async def health_check(currentUser: User = Depends(getCurrentUser)):
- """Health check for Google Cloud voice services."""
- try:
- voiceInterface = _getVoiceInterface(currentUser)
- test_result = await voiceInterface.healthCheck()
-
- return test_result
-
- except Exception as e:
- logger.error(f"❌ Health check failed: {e}")
- return {
- "status": "unhealthy",
- "error": str(e)
- }
-
-@router.get("/settings")
-async def get_voice_settings(currentUser: User = Depends(getCurrentUser)):
- """Get voice settings for the current user (reads from UserVoicePreferences)."""
- from modules.datamodels.datamodelUam import UserVoicePreferences
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
- userId = str(currentUser.id)
-
- prefs = rootInterface.db.getRecordset(
- UserVoicePreferences, recordFilter={"userId": userId}
- )
- if prefs:
- data = prefs[0] if isinstance(prefs[0], dict) else prefs[0].model_dump()
- return {"success": True, "data": {"user_settings": data}}
- return {"success": True, "data": {"user_settings": UserVoicePreferences(userId=userId).model_dump()}}
-
-
-@router.post("/settings")
-async def save_voice_settings(
- settings: Dict[str, Any] = Body(...),
- currentUser: User = Depends(getCurrentUser)
-):
- """Save voice settings for the current user (writes to UserVoicePreferences)."""
- from modules.datamodels.datamodelUam import UserVoicePreferences, _normalizeTtsVoiceMap
- from modules.interfaces.interfaceDbApp import getRootInterface
- rootInterface = getRootInterface()
- userId = str(currentUser.id)
-
- allowedFields = {
- "sttLanguage", "ttsLanguage", "ttsVoice", "ttsVoiceMap",
- "translationSourceLanguage", "translationTargetLanguage",
- }
- updateData = {k: v for k, v in settings.items() if k in allowedFields}
- if "ttsVoiceMap" in updateData:
- updateData["ttsVoiceMap"] = _normalizeTtsVoiceMap(updateData["ttsVoiceMap"])
-
- existing = rootInterface.db.getRecordset(
- UserVoicePreferences, recordFilter={"userId": userId}
- )
- if existing:
- existingRecord = existing[0]
- existingId = existingRecord.get("id") if isinstance(existingRecord, dict) else existingRecord.id
- rootInterface.db.recordModify(UserVoicePreferences, existingId, updateData)
- else:
- newPrefs = UserVoicePreferences(userId=userId, **updateData)
- rootInterface.db.recordCreate(UserVoicePreferences, newPrefs.model_dump())
-
- return {"success": True, "message": "Voice settings saved successfully", "data": updateData}
-
# =========================================================================
# STT Streaming WebSocket — generic, used by all features
# =========================================================================
diff --git a/modules/routes/routeVoiceUser.py b/modules/routes/routeVoiceUser.py
index 2f21662b..a3c3fda7 100644
--- a/modules/routes/routeVoiceUser.py
+++ b/modules/routes/routeVoiceUser.py
@@ -17,6 +17,8 @@ from modules.auth import getCurrentUser, limiter
from modules.datamodels.datamodelUam import User, UserVoicePreferences, _normalizeTtsVoiceMap
from modules.interfaces.interfaceDbApp import getRootInterface
from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+from modules.shared.i18nRegistry import apiRouteContext
+routeApiMsg = apiRouteContext("routeVoiceUser")
logger = logging.getLogger(__name__)
@@ -176,7 +178,7 @@ def _resolveMandateIdForVoiceTestAi(request: Request, currentUser: User) -> str:
if headerRaw not in memberIds:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
- detail="X-Mandate-Id is not a mandate you belong to.",
+ detail=routeApiMsg("X-Mandate-Id is not a mandate you belong to."),
)
if _mandatePassesAiPoolBilling(currentUser, headerRaw, userId):
logger.info(
@@ -294,7 +296,7 @@ async def _generateTtsSampleTextForLocale(
logger.warning("Voice test AI sample empty or errorCount=%s", getattr(response, "errorCount", None))
raise HTTPException(
status_code=status.HTTP_502_BAD_GATEWAY,
- detail="Could not generate voice test sample text.",
+ detail=routeApiMsg("Could not generate voice test sample text."),
)
if len(content) > 500:
content = content[:500].rstrip()
diff --git a/modules/routes/routeWorkflowDashboard.py b/modules/routes/routeWorkflowDashboard.py
new file mode 100644
index 00000000..c44951b3
--- /dev/null
+++ b/modules/routes/routeWorkflowDashboard.py
@@ -0,0 +1,731 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+System-level Workflow Dashboard API.
+
+Provides cross-feature, cross-mandate access to workflow runs AND workflows
+with RBAC scoping: user sees own runs/workflows, mandate admin sees mandate
+runs/workflows, sysadmin sees all.
+"""
+
+import asyncio
+import json
+import logging
+import math
+from typing import Optional, List
+from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException
+from fastapi.responses import StreamingResponse
+from slowapi import Limiter
+from slowapi.util import get_remote_address
+
+from modules.auth.authentication import getRequestContext, RequestContext
+from modules.interfaces.interfaceDbApp import getRootInterface
+from modules.connectors.connectorDbPostgre import DatabaseConnector
+from modules.shared.configuration import APP_CONFIG
+from modules.datamodels.datamodelPagination import PaginationParams, normalize_pagination_dict
+from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import (
+ AutoRun, AutoStepLog, AutoWorkflow, AutoTask,
+)
+from modules.shared.i18nRegistry import apiRouteContext
+
+routeApiMsg = apiRouteContext("routeWorkflowDashboard")
+
+logger = logging.getLogger(__name__)
+limiter = Limiter(key_func=get_remote_address)
+
+router = APIRouter(prefix="/api/system/workflow-runs", tags=["WorkflowDashboard"])
+
+_GREENFIELD_DB = "poweron_graphicaleditor"
+
+
+def _getDb() -> DatabaseConnector:
+ return DatabaseConnector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase=_GREENFIELD_DB,
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"),
+ dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
+ userId=None,
+ )
+
+
+def _getUserMandateIds(userId: str) -> list[str]:
+ """Get mandate IDs the user is a member of."""
+ rootIface = getRootInterface()
+ memberships = rootIface.getUserMandates(userId)
+ return [um.mandateId for um in memberships if um.mandateId and um.enabled]
+
+
+def _getAdminMandateIds(userId: str, mandateIds: list) -> list:
+ """Batch-check which mandates the user is admin for (2 SQL queries total)."""
+ if not mandateIds:
+ return []
+ rootIface = getRootInterface()
+ from modules.datamodels.datamodelMembership import UserMandateRole
+ allRoles = rootIface.db.getRecordset(UserMandateRole, recordFilter={
+ "userId": userId, "mandateId": mandateIds,
+ })
+ if not allRoles:
+ return []
+
+ roleIds = set()
+ roleToMandate: dict = {}
+ for r in allRoles:
+ row = r if isinstance(r, dict) else r.__dict__
+ rid = row.get("roleId")
+ mid = row.get("mandateId")
+ if rid:
+ roleIds.add(rid)
+ roleToMandate.setdefault(rid, set()).add(mid)
+
+ if not roleIds:
+ return []
+
+ from modules.datamodels.datamodelRbac import MandateRole
+ roleRecords = rootIface.db.getRecordset(MandateRole, recordFilter={"id": list(roleIds)})
+ adminMandates: set = set()
+ for role in (roleRecords or []):
+ row = role if isinstance(role, dict) else role.__dict__
+ if row.get("isAdmin"):
+ rid = row.get("id")
+ if rid and rid in roleToMandate:
+ adminMandates.update(roleToMandate[rid])
+
+ return [mid for mid in mandateIds if mid in adminMandates]
+
+
+def _isUserMandateAdmin(userId: str, mandateId: str) -> bool:
+ """Check if user is admin for a specific mandate."""
+ adminIds = _getAdminMandateIds(userId, [mandateId])
+ return mandateId in adminIds
+
+
+def _scopedRunFilter(context: RequestContext) -> Optional[dict]:
+ """
+ Build a DB filter dict based on RBAC:
+ - sysadmin: None (no filter)
+ - mandate admin: mandateId IN user's mandates
+ - normal user: ownerId = userId
+ """
+ if context.hasSysAdminRole:
+ return None
+
+ userId = str(context.user.id) if context.user else None
+ if not userId:
+ return {"ownerId": "__impossible__"}
+
+ mandateIds = _getUserMandateIds(userId)
+ adminMandateIds = _getAdminMandateIds(userId, mandateIds)
+
+ if adminMandateIds:
+ return {"mandateId": adminMandateIds}
+
+ return {"ownerId": userId}
+
+
+def _scopedWorkflowFilter(context: RequestContext) -> Optional[dict]:
+ """
+ Build a DB filter for AutoWorkflow based on RBAC:
+ - sysadmin: None (no filter, sees all)
+ - normal user: mandateId IN user's mandates
+ """
+ if context.hasSysAdminRole:
+ return None
+
+ userId = str(context.user.id) if context.user else None
+ if not userId:
+ return {"mandateId": "__impossible__"}
+
+ mandateIds = _getUserMandateIds(userId)
+ if mandateIds:
+ return {"mandateId": mandateIds}
+
+ return {"mandateId": "__impossible__"}
+
+
+
+@router.get("")
+@limiter.limit("60/minute")
+def get_workflow_runs(
+ request: Request,
+ limit: int = Query(50, ge=1, le=200),
+ offset: int = Query(0, ge=0),
+ status: Optional[str] = Query(None, description="Filter by status"),
+ mandateId: Optional[str] = Query(None, description="Filter by mandate"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """List workflow runs with RBAC scoping (SQL-paginated)."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoRun):
+ return {"runs": [], "total": 0, "limit": limit, "offset": offset}
+
+ baseFilter = _scopedRunFilter(context)
+ recordFilter = dict(baseFilter) if baseFilter else {}
+
+ if status:
+ recordFilter["status"] = status
+ if mandateId:
+ recordFilter["mandateId"] = mandateId
+
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except Exception:
+ pass
+
+ if not paginationParams:
+ page = (offset // limit) + 1 if limit > 0 else 1
+ paginationParams = PaginationParams(
+ page=page,
+ pageSize=limit,
+ sort=[{"field": "sysCreatedAt", "direction": "desc"}],
+ )
+
+ result = db.getRecordsetPaginated(
+ AutoRun,
+ pagination=paginationParams,
+ recordFilter=recordFilter if recordFilter else None,
+ )
+ pageRuns = result.get("items", []) if isinstance(result, dict) else result.items
+ total = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
+
+ wfIds = list({r.get("workflowId") for r in pageRuns if r.get("workflowId")})
+ wfMap: dict = {}
+ if wfIds and db._ensureTableExists(AutoWorkflow):
+ wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds})
+ for wf in (wfs or []):
+ wfMap[wf.get("id")] = wf
+
+ mandateIds = list({r.get("mandateId") for r in pageRuns if r.get("mandateId")})
+ instanceIds = list({
+ wfMap[r.get("workflowId")].get("featureInstanceId")
+ for r in pageRuns
+ if r.get("workflowId") in wfMap and wfMap[r.get("workflowId")].get("featureInstanceId")
+ })
+
+ mandateLabelMap: dict = {}
+ instanceLabelMap: dict = {}
+ try:
+ rootIface = getRootInterface()
+ if mandateIds:
+ mMap = rootIface.getMandatesByIds(mandateIds)
+ for mid, m in mMap.items():
+ mandateLabelMap[mid] = getattr(m, "label", None) or getattr(m, "name", mid) or mid
+ if instanceIds:
+ from modules.interfaces.interfaceFeatures import getFeatureInterface
+ featureIface = getFeatureInterface(rootIface.db)
+ for iid in instanceIds:
+ fi = featureIface.getFeatureInstance(iid)
+ if fi:
+ instanceLabelMap[iid] = fi.label or iid
+ except Exception as e:
+ logger.warning(f"Failed to enrich run labels: {e}")
+
+ runs = []
+ for r in pageRuns:
+ row = dict(r)
+ wfId = row.get("workflowId")
+ wf = wfMap.get(wfId, {})
+ row["workflowLabel"] = (
+ row.get("label")
+ or (wf.get("label") if isinstance(wf, dict) else None)
+ or wfId
+ or "—"
+ )
+ row["mandateLabel"] = mandateLabelMap.get(row.get("mandateId"), row.get("mandateId") or "—")
+ fiid = wf.get("featureInstanceId") if isinstance(wf, dict) else None
+ row["featureInstanceId"] = fiid
+ row["instanceLabel"] = instanceLabelMap.get(fiid, fiid or "—")
+ runs.append(row)
+
+ return {"runs": runs, "total": total, "limit": limit, "offset": offset}
+
+
+@router.get("/metrics")
+@limiter.limit("60/minute")
+def get_workflow_metrics(
+ request: Request,
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Aggregated metrics across all accessible workflow runs (SQL COUNT).
+
+ Uses the same RBAC scoping as the runs list and workflows list
+ so that metric cards always match the table data.
+ """
+ db = _getDb()
+
+ # --- Workflow counts (same filter as /workflows endpoint) ---
+ workflowCount = 0
+ activeWorkflows = 0
+ if db._ensureTableExists(AutoWorkflow):
+ wfBaseFilter = _scopedWorkflowFilter(context)
+ wfFilter = dict(wfBaseFilter) if wfBaseFilter else {}
+ wfFilter["isTemplate"] = False
+
+ wfCount = db.getRecordsetPaginated(
+ AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=wfFilter if wfFilter else None,
+ )
+ workflowCount = wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems
+
+ activeFilter = dict(wfFilter)
+ activeFilter["active"] = True
+ activeCount = db.getRecordsetPaginated(
+ AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=activeFilter,
+ )
+ activeWorkflows = activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems
+
+ # --- Run counts (same filter as /runs endpoint) ---
+ if not db._ensureTableExists(AutoRun):
+ return {
+ "totalRuns": 0, "runsByStatus": {}, "totalTokens": 0,
+ "totalCredits": 0, "workflowCount": workflowCount,
+ "activeWorkflows": activeWorkflows,
+ }
+
+ runBaseFilter = _scopedRunFilter(context)
+
+ countResult = db.getRecordsetPaginated(
+ AutoRun, pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=runBaseFilter,
+ )
+ totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems
+
+ runsByStatus: dict = {}
+ try:
+ statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=runBaseFilter)
+ for sv in (statusValues or []):
+ statusFilter = dict(runBaseFilter) if runBaseFilter else {}
+ statusFilter["status"] = sv
+ sr = db.getRecordsetPaginated(
+ AutoRun, pagination=PaginationParams(page=1, pageSize=1),
+ recordFilter=statusFilter,
+ )
+ runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems
+ except Exception as e:
+ logger.warning(f"Failed to compute runsByStatus: {e}")
+
+ totalTokens = 0
+ totalCredits = 0.0
+ if 0 < totalRuns <= 10000:
+ allRuns = db.getRecordset(AutoRun, recordFilter=runBaseFilter, fieldFilter=["costTokens", "costCredits"]) or []
+ for r in allRuns:
+ totalTokens += r.get("costTokens", 0) or 0
+ totalCredits += r.get("costCredits", 0.0) or 0.0
+
+ return {
+ "totalRuns": totalRuns,
+ "runsByStatus": runsByStatus,
+ "totalTokens": totalTokens,
+ "totalCredits": round(totalCredits, 4),
+ "workflowCount": workflowCount,
+ "activeWorkflows": activeWorkflows,
+ }
+
+
+# ---------------------------------------------------------------------------
+# System-level Workflow listing (all workflows the user can see via RBAC)
+# ---------------------------------------------------------------------------
+
+@router.get("/workflows")
+@limiter.limit("60/minute")
+def get_system_workflows(
+ request: Request,
+ active: Optional[bool] = Query(None, description="Filter by active status"),
+ mandateId: Optional[str] = Query(None, description="Filter by mandate"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """List all workflows the user has access to (RBAC-scoped, cross-instance)."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoWorkflow):
+ return {"items": [], "pagination": {"currentPage": 1, "pageSize": 25, "totalItems": 0, "totalPages": 0}}
+
+ baseFilter = _scopedWorkflowFilter(context)
+ recordFilter = dict(baseFilter) if baseFilter else {}
+ recordFilter["isTemplate"] = False
+
+ if active is not None:
+ recordFilter["active"] = active
+ if mandateId:
+ recordFilter["mandateId"] = mandateId
+
+ paginationParams = None
+ if pagination:
+ try:
+ paginationDict = json.loads(pagination)
+ if paginationDict:
+ paginationDict = normalize_pagination_dict(paginationDict)
+ paginationParams = PaginationParams(**paginationDict)
+ except Exception:
+ pass
+
+ if not paginationParams:
+ paginationParams = PaginationParams(
+ page=1,
+ pageSize=25,
+ sort=[{"field": "sysCreatedAt", "direction": "desc"}],
+ )
+
+ result = db.getRecordsetPaginated(
+ AutoWorkflow,
+ pagination=paginationParams,
+ recordFilter=recordFilter if recordFilter else None,
+ )
+ pageItems = result.get("items", []) if isinstance(result, dict) else result.items
+ totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems
+ totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages
+
+ mandateIds = list({w.get("mandateId") for w in pageItems if w.get("mandateId")})
+ instanceIds = list({w.get("featureInstanceId") for w in pageItems if w.get("featureInstanceId")})
+
+ mandateLabelMap: dict = {}
+ instanceLabelMap: dict = {}
+ try:
+ rootIface = getRootInterface()
+ if mandateIds:
+ mandateMap = rootIface.getMandatesByIds(mandateIds)
+ for mid, m in mandateMap.items():
+ mandateLabelMap[mid] = getattr(m, "label", None) or getattr(m, "name", mid) or mid
+ if instanceIds:
+ from modules.interfaces.interfaceFeatures import getFeatureInterface
+ featureIface = getFeatureInterface(rootIface.db)
+ for iid in instanceIds:
+ fi = featureIface.getFeatureInstance(iid)
+ if fi:
+ instanceLabelMap[iid] = fi.label or iid
+ except Exception as e:
+ logger.warning(f"Failed to enrich workflow labels: {e}")
+
+ userId = str(context.user.id) if context.user else None
+ adminMandateIds = []
+ if userId and not context.hasSysAdminRole:
+ userMandateIds = _getUserMandateIds(userId)
+ adminMandateIds = _getAdminMandateIds(userId, userMandateIds)
+
+ workflowIds = [w.get("id") for w in pageItems if w.get("id")]
+ activeRunMap: dict = {}
+ runCountMap: dict = {}
+ lastStartedMap: dict = {}
+ if workflowIds:
+ try:
+ if db._ensureTableExists(AutoRun):
+ for wfId in workflowIds:
+ runs = db.getRecordset(AutoRun, recordFilter={"workflowId": wfId})
+ runCountMap[wfId] = len(runs)
+ for r in runs:
+ rDict = dict(r)
+ ts = rDict.get("sysCreatedAt")
+ if ts and (lastStartedMap.get(wfId) is None or ts > lastStartedMap.get(wfId)):
+ lastStartedMap[wfId] = ts
+ if rDict.get("status") in ("running", "paused"):
+ activeRunMap[wfId] = rDict.get("id")
+ except Exception as e:
+ logger.warning(f"Failed to enrich workflow run info: {e}")
+
+ items = []
+ for w in pageItems:
+ row = dict(w)
+ wMandateId = row.get("mandateId")
+ wfId = row.get("id")
+ row["mandateLabel"] = mandateLabelMap.get(wMandateId, wMandateId or "—")
+ row["instanceLabel"] = instanceLabelMap.get(row.get("featureInstanceId"), row.get("featureInstanceId") or "—")
+ row["isRunning"] = wfId in activeRunMap
+ row["activeRunId"] = activeRunMap.get(wfId)
+ row["runCount"] = runCountMap.get(wfId, 0)
+ row["lastStartedAt"] = lastStartedMap.get(wfId)
+
+ if context.hasSysAdminRole:
+ row["canEdit"] = True
+ row["canDelete"] = True
+ row["canExecute"] = True
+ elif wMandateId and wMandateId in adminMandateIds:
+ row["canEdit"] = True
+ row["canDelete"] = True
+ row["canExecute"] = True
+ else:
+ row["canEdit"] = False
+ row["canDelete"] = False
+ row["canExecute"] = False
+
+ row.pop("graph", None)
+
+ items.append(row)
+
+ return {
+ "items": items,
+ "pagination": {
+ "currentPage": paginationParams.page,
+ "pageSize": paginationParams.pageSize,
+ "totalItems": totalItems,
+ "totalPages": totalPages,
+ },
+ }
+
+
+# ---------------------------------------------------------------------------
+# Filter-values endpoints (for FormGeneratorTable column filters)
+# ---------------------------------------------------------------------------
+
+def _enrichedFilterValues(
+ db, context: RequestContext, modelClass, scopeFilter, column: str,
+) -> List[str]:
+ """Return distinct filter values for enriched columns (mandateLabel, instanceLabel)
+ or delegate to DB-level DISTINCT for raw columns."""
+ if column in ("mandateLabel", "mandateId"):
+ baseFilter = scopeFilter(context)
+ recordFilter = dict(baseFilter) if baseFilter else {}
+ if modelClass == AutoWorkflow:
+ recordFilter["isTemplate"] = False
+ items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["mandateId"]) or []
+ mandateIds = list({r.get("mandateId") for r in items if r.get("mandateId")})
+ if not mandateIds:
+ return []
+ try:
+ rootIface = getRootInterface()
+ mMap = rootIface.getMandatesByIds(mandateIds)
+ labels = sorted({
+ getattr(m, "label", None) or getattr(m, "name", mid) or mid
+ for mid, m in mMap.items()
+ }, key=lambda v: v.lower())
+ return labels
+ except Exception:
+ return sorted(mandateIds)
+
+ if column in ("instanceLabel", "featureInstanceId"):
+ baseFilter = scopeFilter(context)
+ recordFilter = dict(baseFilter) if baseFilter else {}
+ if modelClass == AutoWorkflow:
+ recordFilter["isTemplate"] = False
+ items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["featureInstanceId"]) or []
+ instanceIds = list({r.get("featureInstanceId") for r in items if r.get("featureInstanceId")})
+ else:
+ items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["workflowId"]) or []
+ wfIds = list({r.get("workflowId") for r in items if r.get("workflowId")})
+ instanceIds = []
+ if wfIds and db._ensureTableExists(AutoWorkflow):
+ wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds}, fieldFilter=["featureInstanceId"]) or []
+ instanceIds = list({w.get("featureInstanceId") for w in wfs if w.get("featureInstanceId")})
+ if not instanceIds:
+ return []
+ try:
+ from modules.interfaces.interfaceFeatures import getFeatureInterface
+ rootIface = getRootInterface()
+ featureIface = getFeatureInterface(rootIface.db)
+ labels = []
+ for iid in instanceIds:
+ fi = featureIface.getFeatureInstance(iid)
+ if fi:
+ labels.append(fi.label or iid)
+ return sorted(set(labels), key=lambda v: v.lower())
+ except Exception:
+ return sorted(instanceIds)
+
+ if column == "workflowLabel":
+ baseFilter = scopeFilter(context)
+ recordFilter = dict(baseFilter) if baseFilter else {}
+ items = db.getRecordset(modelClass, recordFilter=recordFilter or None, fieldFilter=["workflowId", "label"]) or []
+ labels = set()
+ wfIds = set()
+ for r in items:
+ if r.get("label"):
+ labels.add(r["label"])
+ if r.get("workflowId"):
+ wfIds.add(r["workflowId"])
+ if wfIds and db._ensureTableExists(AutoWorkflow):
+ wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": list(wfIds)}, fieldFilter=["label"]) or []
+ for wf in wfs:
+ if wf.get("label"):
+ labels.add(wf["label"])
+ return sorted(labels, key=lambda v: v.lower())
+
+ baseFilter = scopeFilter(context)
+ recordFilter = dict(baseFilter) if baseFilter else {}
+ if modelClass == AutoWorkflow:
+ recordFilter["isTemplate"] = False
+ return db.getDistinctColumnValues(modelClass, column, recordFilter=recordFilter or None) or []
+
+
+@router.get("/filter-values")
+@limiter.limit("60/minute")
+def get_run_filter_values(
+ request: Request,
+ column: str = Query(..., description="Column key"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
+ context: RequestContext = Depends(getRequestContext),
+) -> list:
+ """Return distinct filter values for a column in workflow runs."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoRun):
+ return []
+ return _enrichedFilterValues(db, context, AutoRun, _scopedRunFilter, column)
+
+
+@router.get("/workflows/filter-values")
+@limiter.limit("60/minute")
+def get_workflow_filter_values(
+ request: Request,
+ column: str = Query(..., description="Column key"),
+ pagination: Optional[str] = Query(None, description="JSON-encoded current filters"),
+ context: RequestContext = Depends(getRequestContext),
+) -> list:
+ """Return distinct filter values for a column in workflows."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoWorkflow):
+ return []
+ return _enrichedFilterValues(db, context, AutoWorkflow, _scopedWorkflowFilter, column)
+
+
+# ---------------------------------------------------------------------------
+# Run-specific endpoints (path-param routes MUST come after static routes)
+# ---------------------------------------------------------------------------
+
+@router.get("/{runId}/steps")
+@limiter.limit("60/minute")
+def get_run_steps(
+ request: Request,
+ runId: str = Path(..., description="Run ID"),
+ context: RequestContext = Depends(getRequestContext),
+) -> dict:
+ """Get step logs for a specific run (with access check)."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoRun):
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+
+ runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
+ if not runs:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+ run = dict(runs[0])
+
+ if not context.hasSysAdminRole:
+ userId = str(context.user.id) if context.user else None
+ runOwner = run.get("ownerId")
+ runMandate = run.get("mandateId")
+
+ if runOwner == userId:
+ pass
+ elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
+ pass
+ else:
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
+
+ if not db._ensureTableExists(AutoStepLog):
+ return {"steps": []}
+
+ records = db.getRecordset(AutoStepLog, recordFilter={"runId": runId})
+ steps = [dict(r) for r in records] if records else []
+ steps.sort(key=lambda s: s.get("startedAt") or 0)
+ return {"steps": steps}
+
+
+# ---------------------------------------------------------------------------
+# SSE stream for live run tracing (system-level, no instanceId required)
+# ---------------------------------------------------------------------------
+
+@router.get("/{runId}/stream")
+async def get_run_stream(
+ request: Request,
+ runId: str = Path(..., description="Run ID"),
+ context: RequestContext = Depends(getRequestContext),
+):
+ """SSE stream for live step-log updates during a workflow run (system-level)."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoRun):
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+
+ runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
+ if not runs:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+ run = dict(runs[0])
+
+ if not context.hasSysAdminRole:
+ userId = str(context.user.id) if context.user else None
+ runOwner = run.get("ownerId")
+ runMandate = run.get("mandateId")
+ if runOwner == userId:
+ pass
+ elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
+ pass
+ else:
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
+
+ from modules.serviceCenter.core.serviceStreaming.eventManager import get_event_manager
+ sseEventManager = get_event_manager()
+ queueId = f"run-trace-{runId}"
+ sseEventManager.create_queue(queueId)
+
+ async def _sseGenerator():
+ queue = sseEventManager.get_queue(queueId)
+ if not queue:
+ return
+ while True:
+ try:
+ event = await asyncio.wait_for(queue.get(), timeout=30)
+ except asyncio.TimeoutError:
+ yield "data: {\"type\": \"keepalive\"}\n\n"
+ continue
+ if event is None:
+ break
+ payload = event.get("data", event) if isinstance(event, dict) else event
+ yield f"data: {json.dumps(payload, default=str)}\n\n"
+ eventType = payload.get("type", "") if isinstance(payload, dict) else ""
+ if eventType in ("run_complete", "run_failed"):
+ break
+ await sseEventManager.cleanup(queueId, delay=10)
+
+ return StreamingResponse(
+ _sseGenerator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "X-Accel-Buffering": "no",
+ },
+ )
+
+
+@router.post("/{runId}/stop")
+@limiter.limit("30/minute")
+def stop_workflow_run(
+ request: Request,
+ runId: str = Path(..., description="Run ID"),
+ context: RequestContext = Depends(getRequestContext),
+):
+ """Stop a running workflow execution (system-level)."""
+ db = _getDb()
+ if not db._ensureTableExists(AutoRun):
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+
+ runs = db.getRecordset(AutoRun, recordFilter={"id": runId})
+ if not runs:
+ raise HTTPException(status_code=404, detail=routeApiMsg("Run not found"))
+ run = dict(runs[0])
+
+ if not context.hasSysAdminRole:
+ userId = str(context.user.id) if context.user else None
+ runOwner = run.get("ownerId")
+ runMandate = run.get("mandateId")
+ if runOwner == userId:
+ pass
+ elif runMandate and userId and _isUserMandateAdmin(userId, runMandate):
+ pass
+ else:
+ raise HTTPException(status_code=403, detail=routeApiMsg("Access denied"))
+
+ from modules.workflows.automation2.executionEngine import requestRunStop
+ flagged = requestRunStop(runId)
+
+ if not flagged:
+ currentStatus = run.get("status", "")
+ if currentStatus in ("completed", "failed", "stopped"):
+ return {"status": currentStatus, "runId": runId, "message": "Run already finished"}
+ db.recordModify(AutoRun, runId, {"status": "stopped"})
+ return {"status": "stopped", "runId": runId, "message": "Run not active in memory, marked as stopped"}
+
+ return {"status": "stopping", "runId": runId, "message": "Stop signal sent"}
diff --git a/modules/security/rbac.py b/modules/security/rbac.py
index 9199e73b..bec0b70e 100644
--- a/modules/security/rbac.py
+++ b/modules/security/rbac.py
@@ -86,12 +86,10 @@ class RbacClass:
# NOTE: sysadmin ROLE users get full access via AccessRules (DATA: ALL)
# This flag bypass is kept as fallback for true system-level operations
if hasattr(user, 'isSysAdmin') and user.isSysAdmin:
- # User-owned namespaces: SysAdmin gets MY access only (own data).
- # Every user -- including SysAdmin -- only has CRUD for their own
- # chat workflows and files. Automation is excluded because it's
- # managed by admins and the system event user needs ALL access.
- _USER_OWNED_PREFIXES = ("data.chat.", "data.files.")
- if item and any(item.startswith(p) for p in _USER_OWNED_PREFIXES):
+ # Chat namespace: SysAdmin gets MY access only (own data).
+ # Files namespace: SysAdmin gets ALL (can manage all files in system).
+ _CHAT_PREFIXES = ("data.chat.",)
+ if item and any(item.startswith(p) for p in _CHAT_PREFIXES):
return UserPermissions(
view=True,
read=AccessLevel.MY,
diff --git a/modules/security/rbacCatalog.py b/modules/security/rbacCatalog.py
index 14b87534..bc5b0353 100644
--- a/modules/security/rbacCatalog.py
+++ b/modules/security/rbacCatalog.py
@@ -43,7 +43,7 @@ class RbacCatalogService:
self._initialized = True
logger.info("RBAC Catalog Service initialized")
- def registerUiObject(self, featureCode: str, objectKey: str, label: Dict[str, str], meta: Optional[Dict[str, Any]] = None) -> bool:
+ def registerUiObject(self, featureCode: str, objectKey: str, label: str, meta: Optional[Dict[str, Any]] = None) -> bool:
"""Register a UI object for a feature."""
try:
self._uiObjects[objectKey] = {"objectKey": objectKey, "featureCode": featureCode, "label": label, "meta": meta or {}, "type": "UI"}
@@ -52,7 +52,7 @@ class RbacCatalogService:
logger.error(f"Failed to register UI object {objectKey}: {e}")
return False
- def registerResourceObject(self, featureCode: str, objectKey: str, label: Dict[str, str], meta: Optional[Dict[str, Any]] = None) -> bool:
+ def registerResourceObject(self, featureCode: str, objectKey: str, label: str, meta: Optional[Dict[str, Any]] = None) -> bool:
"""Register a RESOURCE object for a feature."""
try:
self._resourceObjects[objectKey] = {"objectKey": objectKey, "featureCode": featureCode, "label": label, "meta": meta or {}, "type": "RESOURCE"}
@@ -61,14 +61,14 @@ class RbacCatalogService:
logger.error(f"Failed to register RESOURCE object {objectKey}: {e}")
return False
- def registerDataObject(self, featureCode: str, objectKey: str, label: Dict[str, str], meta: Optional[Dict[str, Any]] = None) -> bool:
+ def registerDataObject(self, featureCode: str, objectKey: str, label: str, meta: Optional[Dict[str, Any]] = None) -> bool:
"""
Register a DATA object (table/entity) for a feature.
Args:
featureCode: Feature code (e.g., "trustee", "system")
objectKey: Dot-notation key (e.g., "data.feature.trustee.TrusteeContract")
- label: Multilingual label dict
+ label: German plaintext label (used as i18n key)
meta: Optional metadata (e.g., table name, fields list)
"""
try:
@@ -84,7 +84,7 @@ class RbacCatalogService:
logger.error(f"Failed to register DATA object {objectKey}: {e}")
return False
- def registerFeatureDefinition(self, featureCode: str, label: Dict[str, str], icon: str) -> bool:
+ def registerFeatureDefinition(self, featureCode: str, label: str, icon: str) -> bool:
"""Register a feature definition."""
try:
self._featureDefinitions[featureCode] = {"code": featureCode, "label": label, "icon": icon}
diff --git a/modules/serviceCenter/registry.py b/modules/serviceCenter/registry.py
index 851e4894..64003d29 100644
--- a/modules/serviceCenter/registry.py
+++ b/modules/serviceCenter/registry.py
@@ -33,98 +33,98 @@ IMPORTABLE_SERVICES: Dict[str, Dict[str, Any]] = {
"class": "TicketService",
"dependencies": [],
"objectKey": "service.ticket",
- "label": {"en": "Ticket System", "de": "Ticket-System", "fr": "Système de tickets"},
+ "label": "Ticket-System",
},
"messaging": {
"module": "modules.serviceCenter.services.serviceMessaging.mainServiceMessaging",
"class": "MessagingService",
"dependencies": [],
"objectKey": "service.messaging",
- "label": {"en": "Messaging", "de": "Nachrichten", "fr": "Messagerie"},
+ "label": "Nachrichten",
},
"billing": {
"module": "modules.serviceCenter.services.serviceBilling.mainServiceBilling",
"class": "BillingService",
"dependencies": ["subscription"],
"objectKey": "service.billing",
- "label": {"en": "Billing", "de": "Abrechnung", "fr": "Facturation"},
+ "label": "Abrechnung",
},
"subscription": {
"module": "modules.serviceCenter.services.serviceSubscription.mainServiceSubscription",
"class": "SubscriptionService",
"dependencies": [],
"objectKey": "service.subscription",
- "label": {"en": "Subscription", "de": "Abonnement", "fr": "Abonnement"},
+ "label": "Abonnement",
},
"sharepoint": {
"module": "modules.serviceCenter.services.serviceSharepoint.mainServiceSharepoint",
"class": "SharepointService",
"dependencies": ["security"],
"objectKey": "service.sharepoint",
- "label": {"en": "SharePoint", "de": "SharePoint", "fr": "SharePoint"},
+ "label": "SharePoint",
},
"clickup": {
"module": "modules.serviceCenter.services.serviceClickup.mainServiceClickup",
"class": "ClickupService",
"dependencies": ["security"],
"objectKey": "service.clickup",
- "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"},
+ "label": "ClickUp",
},
"chat": {
"module": "modules.serviceCenter.services.serviceChat.mainServiceChat",
"class": "ChatService",
"dependencies": ["utils"],
"objectKey": "service.chat",
- "label": {"en": "Chat", "de": "Chat", "fr": "Chat"},
+ "label": "Chat",
},
"extraction": {
"module": "modules.serviceCenter.services.serviceExtraction.mainServiceExtraction",
"class": "ExtractionService",
"dependencies": ["chat", "utils"],
"objectKey": "service.extraction",
- "label": {"en": "Extraction", "de": "Extraktion", "fr": "Extraction"},
+ "label": "Extraktion",
},
"generation": {
"module": "modules.serviceCenter.services.serviceGeneration.mainServiceGeneration",
"class": "GenerationService",
"dependencies": ["utils", "chat"],
"objectKey": "service.generation",
- "label": {"en": "Generation", "de": "Generierung", "fr": "Génération"},
+ "label": "Generierung",
},
"ai": {
"module": "modules.serviceCenter.services.serviceAi.mainServiceAi",
"class": "AiService",
"dependencies": ["chat", "utils", "extraction", "billing"],
"objectKey": "service.ai",
- "label": {"en": "AI", "de": "KI", "fr": "IA"},
+ "label": "KI",
},
"web": {
"module": "modules.serviceCenter.services.serviceWeb.mainServiceWeb",
"class": "WebService",
"dependencies": ["ai", "chat", "utils"],
"objectKey": "service.web",
- "label": {"en": "Web Research", "de": "Web-Recherche", "fr": "Recherche Web"},
+ "label": "Web-Recherche",
},
"neutralization": {
"module": "modules.features.neutralization.serviceNeutralization.mainServiceNeutralization",
"class": "NeutralizationService",
"dependencies": ["extraction", "generation"],
"objectKey": "service.neutralization",
- "label": {"en": "Neutralization", "de": "Neutralisierung", "fr": "Neutralisation"},
+ "label": "Neutralisierung",
},
"agent": {
"module": "modules.serviceCenter.services.serviceAgent.mainServiceAgent",
"class": "AgentService",
"dependencies": ["ai", "chat", "utils", "extraction", "billing", "streaming", "knowledge"],
"objectKey": "service.agent",
- "label": {"en": "Agent", "de": "Agent", "fr": "Agent"},
+ "label": "Agent",
},
"knowledge": {
"module": "modules.serviceCenter.services.serviceKnowledge.mainServiceKnowledge",
"class": "KnowledgeService",
"dependencies": ["ai"],
"objectKey": "service.knowledge",
- "label": {"en": "Knowledge Store", "de": "Wissensspeicher", "fr": "Base de connaissances"},
+ "label": "Wissensspeicher",
},
}
diff --git a/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py b/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py
index 17e953fd..815be871 100644
--- a/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py
+++ b/modules/serviceCenter/services/serviceAgent/actionToolAdapter.py
@@ -80,10 +80,13 @@ def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
paramRequired = paramInfo.get("required", False) if isinstance(paramInfo, dict) else False
jsonType = _pythonTypeToJsonType(paramType)
- properties[paramName] = {
+ prop: Dict[str, Any] = {
"type": jsonType,
- "description": paramDesc
+ "description": paramDesc,
}
+ if jsonType == "array":
+ prop["items"] = _pythonTypeToArrayItems(paramType) or {"type": "string"}
+ properties[paramName] = prop
if paramRequired:
required.append(paramName)
@@ -95,21 +98,37 @@ def _convertParameterSchema(actionParams: Dict[str, Any]) -> Dict[str, Any]:
}
+_TYPE_MAPPING = {
+ "str": "string",
+ "int": "integer",
+ "float": "number",
+ "bool": "boolean",
+ "list": "array",
+ "dict": "object",
+ "List[str]": "array",
+ "List[int]": "array",
+ "List[dict]": "array",
+ "List[float]": "array",
+ "Dict[str, Any]": "object",
+}
+
+_ARRAY_ITEMS_MAPPING = {
+ "list": {"type": "string"},
+ "List[str]": {"type": "string"},
+ "List[int]": {"type": "integer"},
+ "List[float]": {"type": "number"},
+ "List[dict]": {"type": "object"},
+}
+
+
def _pythonTypeToJsonType(pythonType: str) -> str:
"""Map Python type strings to JSON Schema types."""
- mapping = {
- "str": "string",
- "int": "integer",
- "float": "number",
- "bool": "boolean",
- "list": "array",
- "dict": "object",
- "List[str]": "array",
- "List[int]": "array",
- "List[dict]": "array",
- "Dict[str, Any]": "object",
- }
- return mapping.get(pythonType, "string")
+ return _TYPE_MAPPING.get(pythonType, "string")
+
+
+def _pythonTypeToArrayItems(pythonType: str) -> Optional[Dict[str, Any]]:
+ """Return the JSON Schema `items` descriptor for array types, or None."""
+ return _ARRAY_ITEMS_MAPPING.get(pythonType)
def _createDispatchHandler(actionExecutor, methodName: str, actionName: str):
diff --git a/modules/serviceCenter/services/serviceAgent/agentLoop.py b/modules/serviceCenter/services/serviceAgent/agentLoop.py
index fa76141d..d458ee27 100644
--- a/modules/serviceCenter/services/serviceAgent/agentLoop.py
+++ b/modules/serviceCenter/services/serviceAgent/agentLoop.py
@@ -159,8 +159,8 @@ async def runAgentLoop(
if getExternalMemoryKeysFn:
try:
memKeys = getExternalMemoryKeysFn()
- except Exception:
- pass
+ except Exception as e:
+ logger.warning(f"getExternalMemoryKeysFn failed: {e}")
await conversation.summarize(
state.currentRound, _summarizeCall, externalMemoryKeys=memKeys or None
)
@@ -169,7 +169,7 @@ async def runAgentLoop(
aiRequest = AiCallRequest(
prompt="",
options=AiCallOptions(
- operationType=OperationTypeEnum.AGENT,
+ operationType=config.operationType or OperationTypeEnum.AGENT,
temperature=config.temperature
),
messages=conversation.messages,
@@ -333,6 +333,17 @@ async def runAgentLoop(
content=sideEvt.get("content"),
)
+ # Check if requestToolbox was called -- refresh tool definitions for next round
+ _toolboxEscalated = False
+ for result in results:
+ if result.toolName == "requestToolbox" and result.success:
+ _toolboxEscalated = True
+ if _toolboxEscalated:
+ tools = toolRegistry.getTools(toolSet=activeToolSet)
+ toolDefinitions = toolRegistry.formatToolsForFunctionCalling(toolSet=activeToolSet)
+ toolsText = "" if toolDefinitions else toolRegistry.formatToolsForPrompt(toolSet=activeToolSet)
+ logger.info("Toolbox escalation: refreshed tool definitions (%d tools)", len(tools))
+
# Add tool results to conversation
toolResultMessages = [
{"toolCallId": r.toolCallId, "toolName": r.toolName,
diff --git a/modules/serviceCenter/services/serviceAgent/conversationManager.py b/modules/serviceCenter/services/serviceAgent/conversationManager.py
index fe53a921..fffb2dc4 100644
--- a/modules/serviceCenter/services/serviceAgent/conversationManager.py
+++ b/modules/serviceCenter/services/serviceAgent/conversationManager.py
@@ -322,13 +322,6 @@ def _buildSummaryPrompt(
return prompt
-_LANGUAGE_NAMES = {
- "de": "German", "en": "English", "fr": "French", "it": "Italian",
- "es": "Spanish", "pt": "Portuguese", "nl": "Dutch", "ja": "Japanese",
- "zh": "Chinese", "ko": "Korean", "ar": "Arabic", "ru": "Russian",
-}
-
-
def buildSystemPrompt(
tools: List[ToolDefinition],
toolsFormatted: str = None,
@@ -339,16 +332,14 @@ def buildSystemPrompt(
Args:
tools: Available tool definitions.
toolsFormatted: Pre-formatted tool descriptions for text-based fallback.
- userLanguage: ISO 639-1 language code (e.g. "de", "en"). The agent will
- respond in this language.
+ userLanguage: Kept for backwards compatibility, no longer used for language selection.
"""
- langName = _LANGUAGE_NAMES.get(userLanguage, "")
langInstruction = (
- f"IMPORTANT: Always respond in {langName} ({userLanguage}). "
- f"The user's language is {langName}. All your messages, explanations, "
- f"and summaries MUST be in {langName}. "
- f"Only use English for tool call arguments and technical identifiers.\n\n"
- ) if langName else ""
+ "IMPORTANT: Always respond in the same language the user writes in. "
+ "If the user writes in German, respond in German. If in French, respond in French. "
+ "Generate documents and content in the user's language unless explicitly asked otherwise. "
+ "Only use English for tool call arguments and technical identifiers.\n\n"
+ )
prompt = (
f"{langInstruction}"
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/__init__.py b/modules/serviceCenter/services/serviceAgent/coreTools/__init__.py
new file mode 100644
index 00000000..e476ac39
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Core agent tools: registration of built-in ToolRegistry handlers."""
+
+from .registerCore import registerCoreTools
+
+__all__ = ["registerCoreTools"]
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_connectionTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_connectionTools.py
new file mode 100644
index 00000000..e4018014
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_connectionTools.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""External connection tools (list connections, upload, send mail)."""
+
+import logging
+from typing import Any, Dict, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _registerConnectionTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ # ---- Connection tools (external data sources) ----
+
+ def _buildResolverDb():
+ """Build a DB adapter that ConnectorResolver can use to load UserConnections.
+ interfaceDbApp has getUserConnectionById; ConnectorResolver expects getUserConnection."""
+ chatService = services.chat
+ appIf = getattr(chatService, "interfaceDbApp", None)
+ if appIf and hasattr(appIf, "getUserConnectionById"):
+ class _Adapter:
+ def __init__(self, app):
+ self._app = app
+ def getUserConnection(self, connectionId: str):
+ return self._app.getUserConnectionById(connectionId)
+ return _Adapter(appIf)
+ return getattr(chatService, "interfaceDbComponent", None)
+
+ async def _listConnections(args: Dict[str, Any], context: Dict[str, Any]):
+ try:
+ chatService = services.chat
+ connections = chatService.getUserConnections() if hasattr(chatService, "getUserConnections") else []
+ if not connections:
+ return ToolResult(toolCallId="", toolName="listConnections", success=True, data="No connections available.")
+ lines = []
+ for conn in connections:
+ authority = conn.get("authority", "?") if isinstance(conn, dict) else getattr(conn, "authority", "?")
+ authorityVal = authority.value if hasattr(authority, "value") else str(authority)
+ username = conn.get("externalUsername", "") if isinstance(conn, dict) else getattr(conn, "externalUsername", "")
+ email = conn.get("externalEmail", "") if isinstance(conn, dict) else getattr(conn, "externalEmail", "")
+ ref = f"connection:{authorityVal}:{username}"
+ lines.append(f"- {ref} ({email})")
+ return ToolResult(toolCallId="", toolName="listConnections", success=True, data="\n".join(lines))
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="listConnections", success=False, error=str(e))
+
+ async def _uploadToExternal(args: Dict[str, Any], context: Dict[str, Any]):
+ connectionId = args.get("connectionId", "")
+ service = args.get("service", "")
+ path = args.get("path", "")
+ fileId = args.get("fileId", "")
+ if not connectionId or not service or not path or not fileId:
+ return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error="connectionId, service, path, and fileId are required")
+ try:
+ from modules.connectors.connectorResolver import ConnectorResolver
+ resolver = ConnectorResolver(
+ services.getService("security"),
+ _buildResolverDb(),
+ )
+ adapter = await resolver.resolveService(connectionId, service)
+ chatService = services.chat
+ fileContent = chatService.getFileContent(fileId)
+ if not fileContent:
+ return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error="File not found")
+ fileData = fileContent.get("data", b"") if isinstance(fileContent, dict) else b""
+ if isinstance(fileData, str):
+ fileData = fileData.encode("utf-8")
+ fileName = fileContent.get("fileName", "file") if isinstance(fileContent, dict) else "file"
+ result = await adapter.upload(path, fileData, fileName)
+ return ToolResult(toolCallId="", toolName="uploadToExternal", success=True, data=str(result))
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error=str(e))
+
+ async def _sendMail(args: Dict[str, Any], context: Dict[str, Any]):
+ import base64 as _b64
+
+ connectionId = args.get("connectionId", "")
+ to = args.get("to", [])
+ subject = args.get("subject", "")
+ body = args.get("body", "")
+ bodyType = "HTML" if args.get("bodyType", "text").lower() == "html" else "Text"
+ draft = args.get("draft", False)
+ attachmentFileIds = args.get("attachmentFileIds") or []
+
+ if not connectionId or not to or not subject:
+ return ToolResult(toolCallId="", toolName="sendMail", success=False, error="connectionId, to, and subject are required")
+ try:
+ graphAttachments: List[Dict[str, Any]] = []
+ if attachmentFileIds:
+ chatService = services.chat
+ dbMgmt = chatService.interfaceDbComponent
+ for fid in attachmentFileIds:
+ fileRow = dbMgmt.getFile(fid)
+ if not fileRow:
+ return ToolResult(toolCallId="", toolName="sendMail", success=False, error=f"Attachment file not found: {fid}")
+ rawBytes = dbMgmt.getFileData(fid)
+ if not rawBytes:
+ return ToolResult(toolCallId="", toolName="sendMail", success=False, error=f"Attachment file has no data: {fid}")
+ graphAttachments.append({
+ "name": fileRow.fileName,
+ "contentBytes": _b64.b64encode(rawBytes).decode("ascii"),
+ "contentType": getattr(fileRow, "mimeType", "application/octet-stream"),
+ })
+
+ from modules.connectors.connectorResolver import ConnectorResolver
+ resolver = ConnectorResolver(
+ services.getService("security"),
+ _buildResolverDb(),
+ )
+ adapter = await resolver.resolveService(connectionId, "outlook")
+
+ if draft and hasattr(adapter, "createDraft"):
+ result = await adapter.createDraft(
+ to=to, subject=subject, body=body, bodyType=bodyType,
+ cc=args.get("cc"), attachments=graphAttachments or None,
+ )
+ return ToolResult(toolCallId="", toolName="sendMail", success=True, data=str(result))
+
+ if hasattr(adapter, "sendMail"):
+ result = await adapter.sendMail(
+ to=to, subject=subject, body=body, bodyType=bodyType,
+ cc=args.get("cc"), attachments=graphAttachments or None,
+ )
+ return ToolResult(toolCallId="", toolName="sendMail", success=True, data=str(result))
+ return ToolResult(toolCallId="", toolName="sendMail", success=False, error="Mail not supported by this adapter")
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="sendMail", success=False, error=str(e))
+
+ _connToolParams = {
+ "connectionId": {"type": "string", "description": "UserConnection ID"},
+ "service": {"type": "string", "description": "Service name (sharepoint, outlook, drive, etc.)"},
+ }
+
+ registry.register(
+ "listConnections", _listConnections,
+ description="List the user's external connections (SharePoint, OneDrive, Outlook, etc.) and their IDs. Use with browseDataSource/uploadToExternal.",
+ parameters={"type": "object", "properties": {}},
+ readOnly=True,
+ )
+
+ registry.register(
+ "uploadToExternal", _uploadToExternal,
+ description=(
+ "Upload a local file to an external storage via connectionId+service. "
+ "Use listConnections to find available connections."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ **_connToolParams,
+ "path": {"type": "string", "description": "Destination path on the external service"},
+ "fileId": {"type": "string", "description": "Local file ID to upload"},
+ },
+ "required": ["connectionId", "service", "path", "fileId"],
+ },
+ readOnly=False,
+ )
+
+ registry.register(
+ "sendMail", _sendMail,
+ description=(
+ "Send or draft an email via a connected mail service (Outlook). "
+ "Supports HTML body and file attachments from the workspace. "
+ "Set draft=true to save as draft without sending. "
+ "Use listConnections to find the connectionId."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "connectionId": {"type": "string", "description": "UserConnection ID"},
+ "to": {"type": "array", "items": {"type": "string"}, "description": "Recipient email addresses"},
+ "subject": {"type": "string", "description": "Email subject"},
+ "body": {"type": "string", "description": "Email body — plain text or HTML markup"},
+ "bodyType": {"type": "string", "enum": ["text", "html"], "description": "Body format: 'text' (default) or 'html'"},
+ "cc": {"type": "array", "items": {"type": "string"}, "description": "CC addresses"},
+ "attachmentFileIds": {
+ "type": "array", "items": {"type": "string"},
+ "description": "File IDs from the workspace to attach (use listFiles to find IDs)",
+ },
+ "draft": {"type": "boolean", "description": "If true, save as draft in Drafts folder instead of sending"},
+ },
+ "required": ["connectionId", "to", "subject", "body"],
+ },
+ readOnly=False,
+ )
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_crossWorkflowTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_crossWorkflowTools.py
new file mode 100644
index 00000000..7307e019
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_crossWorkflowTools.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Cross-workflow tools and core-only tool-set tagging."""
+
+import logging
+from typing import Any, Dict, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _registerCrossWorkflowTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ # ---- Cross-workflow tools ----
+
+ async def _listWorkflowHistory(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
+ """List all chat workflows in this workspace with metadata."""
+ import json as _json
+ try:
+ chatService = services.chat
+ chatInterface = chatService.interfaceDbChat
+ allWorkflows = chatInterface.getWorkflows() or []
+
+ allWorkflows.sort(
+ key=lambda w: w.get("sysCreatedAt") or w.get("startedAt") or 0,
+ reverse=True,
+ )
+ allWorkflows = allWorkflows[:50]
+
+ items = []
+ for wf in allWorkflows:
+ wfId = wf.get("id", "")
+ name = wf.get("name") or "(unnamed)"
+ createdAt = wf.get("sysCreatedAt") or wf.get("startedAt") or 0
+ lastActivity = wf.get("lastActivity") or createdAt
+
+ msgs = chatInterface.getMessages(wfId) or []
+ messageCount = len(msgs)
+ lastPreview = ""
+ if msgs:
+ lastMsg = msgs[-1] if isinstance(msgs[-1], dict) else (
+ msgs[-1].model_dump() if hasattr(msgs[-1], "model_dump") else {}
+ )
+ content = lastMsg.get("message") or lastMsg.get("content") or ""
+ lastPreview = content[:150]
+
+ items.append({
+ "id": wfId,
+ "name": name,
+ "createdAt": createdAt,
+ "lastActivity": lastActivity,
+ "messageCount": messageCount,
+ "lastMessagePreview": lastPreview,
+ })
+
+ return ToolResult(
+ toolCallId="", toolName="listWorkflowHistory",
+ success=True, data=_json.dumps(items, ensure_ascii=False),
+ )
+ except Exception as e:
+ return ToolResult(
+ toolCallId="", toolName="listWorkflowHistory",
+ success=False, error=str(e),
+ )
+
+ registry.register(
+ "listWorkflowHistory", _listWorkflowHistory,
+ description=(
+ "List all chat conversations/workflows in this workspace. "
+ "Returns id, name, createdAt, lastActivity, messageCount, and a preview "
+ "of the last message for each workflow. Use this to discover previous "
+ "conversations when the user asks about past chats or wants a summary "
+ "across conversations."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {},
+ },
+ readOnly=True,
+ )
+
+ async def _readWorkflowMessages(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
+ """Read messages from a specific workflow."""
+ import json as _json
+ targetWorkflowId = args.get("workflowId", "")
+ limit = int(args.get("limit", 20))
+ offset = int(args.get("offset", 0))
+
+ if not targetWorkflowId:
+ return ToolResult(
+ toolCallId="", toolName="readWorkflowMessages",
+ success=False, error="workflowId is required",
+ )
+
+ try:
+ chatService = services.chat
+ chatInterface = chatService.interfaceDbChat
+ allMsgs = chatInterface.getMessages(targetWorkflowId) or []
+
+ sliced = allMsgs[offset:offset + limit]
+ items = []
+ for msg in sliced:
+ raw = msg if isinstance(msg, dict) else (
+ msg.model_dump() if hasattr(msg, "model_dump") else {}
+ )
+ content = raw.get("message") or raw.get("content") or ""
+ if len(content) > 2000:
+ content = content[:2000] + "..."
+ items.append({
+ "role": raw.get("role", ""),
+ "message": content,
+ "publishedAt": raw.get("publishedAt") or raw.get("sysCreatedAt") or 0,
+ })
+
+ header = f"Workflow {targetWorkflowId}: {len(allMsgs)} total messages"
+ if offset > 0 or len(allMsgs) > offset + limit:
+ header += f" (showing {offset + 1}-{offset + len(sliced)})"
+
+ return ToolResult(
+ toolCallId="", toolName="readWorkflowMessages",
+ success=True,
+ data=header + "\n" + _json.dumps(items, ensure_ascii=False),
+ )
+ except Exception as e:
+ return ToolResult(
+ toolCallId="", toolName="readWorkflowMessages",
+ success=False, error=str(e),
+ )
+
+ registry.register(
+ "readWorkflowMessages", _readWorkflowMessages,
+ description=(
+ "Read messages from a specific chat workflow/conversation. "
+ "Use this after listWorkflowHistory to read the content of a "
+ "specific past conversation. Supports pagination via offset/limit."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string", "description": "ID of the workflow to read messages from"},
+ "limit": {"type": "integer", "description": "Max messages to return (default 20)"},
+ "offset": {"type": "integer", "description": "Skip first N messages (default 0)"},
+ },
+ "required": ["workflowId"],
+ },
+ readOnly=True,
+ )
+
+ # Tag core-only tools so restricted toolSets (e.g. "commcoach") exclude them.
+ # Tools NOT in this set remain toolSet=None → available to ALL sets.
+ _CORE_ONLY_TOOLS = {
+ "listFiles", "listFolders", "tagFile", "moveFile", "createFolder",
+ "writeFile", "deleteFile", "renameFile", "translateText",
+ "deleteFolder", "renameFolder", "moveFolder", "copyFile", "replaceInFile",
+ "listConnections", "uploadToExternal", "sendMail", "downloadFromDataSource",
+ "browseContainer", "readContentObjects", "extractContainerItem",
+ "summarizeContent", "describeImage", "renderDocument",
+ "textToSpeech", "generateImage", "createChart",
+ "speechToText", "detectLanguage", "neutralizeData", "executeCode",
+ "listWorkflowHistory", "readWorkflowMessages",
+ }
+ for _toolName in _CORE_ONLY_TOOLS:
+ _td = registry.getTool(_toolName)
+ if _td:
+ _td.toolSet = "core"
+
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_dataSourceTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_dataSourceTools.py
new file mode 100644
index 00000000..6e1ed0e1
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_dataSourceTools.py
@@ -0,0 +1,258 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""DataSource convenience tools (browse, search, download from external sources)."""
+
+import logging
+from typing import Any, Dict, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _registerDataSourceTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ # ---- DataSource convenience tools ----
+ _SOURCE_TYPE_TO_SERVICE = {
+ "sharepointFolder": "sharepoint",
+ "onedriveFolder": "onedrive",
+ "outlookFolder": "outlook",
+ "googleDriveFolder": "drive",
+ "gmailFolder": "gmail",
+ "ftpFolder": "files",
+ "clickupList": "clickup",
+ }
+
+ async def _resolveDataSource(dsId: str):
+ """Resolve a DataSource record and return (connectionId, service, path, neutralize) or raise."""
+ chatService = services.chat
+ ds = chatService.getDataSource(dsId) if hasattr(chatService, "getDataSource") else None
+ if not ds:
+ raise ValueError(f"DataSource '{dsId}' not found")
+ connectionId = ds.get("connectionId", "")
+ sourceType = ds.get("sourceType", "")
+ path = ds.get("path", "/")
+ label = ds.get("label", "")
+ neutralize = bool(ds.get("neutralize", False))
+ service = _SOURCE_TYPE_TO_SERVICE.get(sourceType, sourceType)
+ if not connectionId:
+ raise ValueError(f"DataSource '{dsId}' has no connectionId")
+ logger.info(f"Resolved DataSource '{dsId}' ({label}): sourceType={sourceType}, service={service}, connectionId={connectionId}, path={path[:80]}, neutralize={neutralize}")
+ return connectionId, service, path, neutralize
+
+ _MAIL_SERVICES = {"outlook", "gmail"}
+
+ async def _browseDataSource(args: Dict[str, Any], context: Dict[str, Any]):
+ dsId = args.get("dataSourceId", "")
+ subPath = args.get("subPath", "")
+ directConnId = args.get("connectionId", "")
+ directService = args.get("service", "")
+ if not dsId and not (directConnId and directService):
+ return ToolResult(toolCallId="", toolName="browseDataSource", success=False,
+ error="Provide either dataSourceId OR connectionId+service")
+ try:
+ if dsId:
+ connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId)
+ else:
+ connectionId, service, basePath = directConnId, directService, args.get("path", "/")
+ if subPath:
+ if subPath.startswith("/"):
+ browsePath = subPath
+ else:
+ browsePath = f"{basePath.rstrip('/')}/{subPath}"
+ else:
+ browsePath = basePath
+ from modules.connectors.connectorResolver import ConnectorResolver
+ resolver = ConnectorResolver(
+ services.getService("security"),
+ _buildResolverDb(),
+ )
+ adapter = await resolver.resolveService(connectionId, service)
+ entries = await adapter.browse(browsePath, filter=args.get("filter"))
+ if not entries:
+ return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data="Empty directory.")
+ lines = []
+ for e in entries:
+ prefix = "[DIR]" if e.isFolder else "[FILE]"
+ sizeInfo = f" ({e.size} bytes)" if e.size else ""
+ lines.append(f"- {prefix} {e.name}{sizeInfo} path: {e.path}")
+ result = "\n".join(lines)
+ if service in _MAIL_SERVICES:
+ result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
+ return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data=result)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="browseDataSource", success=False, error=str(e))
+
+ async def _searchDataSource(args: Dict[str, Any], context: Dict[str, Any]):
+ dsId = args.get("dataSourceId", "")
+ directConnId = args.get("connectionId", "")
+ directService = args.get("service", "")
+ query = args.get("query", "")
+ if not query:
+ return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error="query is required")
+ if not dsId and not (directConnId and directService):
+ return ToolResult(toolCallId="", toolName="searchDataSource", success=False,
+ error="Provide either dataSourceId OR connectionId+service")
+ try:
+ if dsId:
+ connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId)
+ else:
+ connectionId, service, basePath = directConnId, directService, args.get("path", "/")
+ from modules.connectors.connectorResolver import ConnectorResolver
+ resolver = ConnectorResolver(
+ services.getService("security"),
+ _buildResolverDb(),
+ )
+ adapter = await resolver.resolveService(connectionId, service)
+ entries = await adapter.search(query, path=basePath)
+ if not entries:
+ return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data="No results found.")
+ lines = [f"- {e.name} (path: {e.path})" for e in entries]
+ result = "\n".join(lines)
+ if service in _MAIL_SERVICES:
+ result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
+ return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data=result)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error=str(e))
+
+ async def _downloadFromDataSource(args: Dict[str, Any], context: Dict[str, Any]):
+ dsId = args.get("dataSourceId", "")
+ directConnId = args.get("connectionId", "")
+ directService = args.get("service", "")
+ filePath = args.get("filePath", "")
+ fileName = args.get("fileName", "")
+ if not filePath:
+ return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="filePath is required")
+ if not dsId and not (directConnId and directService):
+ return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False,
+ error="Provide either dataSourceId OR connectionId+service")
+ try:
+ from modules.connectors.connectorResolver import ConnectorResolver
+ from modules.connectors.connectorProviderBase import DownloadResult as _DR
+ _sourceNeutralize = False
+ if dsId:
+ connectionId, service, basePath, _sourceNeutralize = await _resolveDataSource(dsId)
+ else:
+ connectionId, service, basePath = directConnId, directService, "/"
+ fullPath = filePath if filePath.startswith("/") else f"{basePath.rstrip('/')}/{filePath}"
+ resolver = ConnectorResolver(
+ services.getService("security"),
+ _buildResolverDb(),
+ )
+ adapter = await resolver.resolveService(connectionId, service)
+ result = await adapter.download(fullPath)
+
+ if isinstance(result, _DR):
+ fileBytes = result.data
+ fileName = result.fileName or fileName
+ else:
+ fileBytes = result
+
+ if not fileBytes:
+ return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="Download returned empty")
+
+ if not fileName or "." not in fileName:
+ pathSegment = fullPath.split("/")[-1] or "downloaded_file"
+ fileName = fileName or pathSegment
+ if "." not in fileName:
+ try:
+ entries = await adapter.browse(basePath)
+ for entry in entries:
+ if getattr(entry, "path", "") == filePath or getattr(entry, "path", "").endswith(filePath):
+ if "." in entry.name:
+ fileName = entry.name
+ break
+ except Exception as e:
+ logger.warning(f"downloadFromDataSource: browse for filename failed: {e}")
+ if "." not in fileName:
+ if fileBytes[:4] == b"%PDF":
+ fileName = f"{fileName}.pdf"
+ elif fileBytes[:2] == b"PK":
+ fileName = f"{fileName}.zip"
+ chatService = services.chat
+ fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(fileBytes, fileName)
+ fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
+ if fiId:
+ chatService.interfaceDbComponent.updateFile(fileItem.id, {"featureInstanceId": fiId})
+ if _sourceNeutralize:
+ chatService.interfaceDbComponent.updateFile(fileItem.id, {"neutralize": True})
+ tempFolderId = _getOrCreateTempFolder(chatService)
+ if tempFolderId:
+ chatService.interfaceDbComponent.updateFile(fileItem.id, {"folderId": tempFolderId})
+ ext = fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
+ hint = "Use readFile to read the text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx", "pdf", "eml", "msg") else "Use readFile to access the content."
+ return ToolResult(
+ toolCallId="", toolName="downloadFromDataSource", success=True,
+ data=f"Downloaded '{fileName}' ({len(fileBytes)} bytes) → local file id: {fileItem.id}. {hint}"
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error=str(e))
+
+ registry.register(
+ "browseDataSource", _browseDataSource,
+ description=(
+ "Browse files and folders in a data source. Accepts either:\n"
+ "- dataSourceId (for attached data sources shown in the prompt), OR\n"
+ "- connectionId + service (for direct connection access via listConnections)."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "dataSourceId": {"type": "string", "description": "DataSource ID (from attached data sources)"},
+ "connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
+ "service": {"type": "string", "description": "Service name (alternative to dataSourceId, e.g. sharepoint, onedrive)"},
+ "path": {"type": "string", "description": "Root path (used with connectionId+service)"},
+ "subPath": {"type": "string", "description": "Sub-path within the data source to browse"},
+ "filter": {"type": "string", "description": "Filter pattern (e.g. '*.pdf')"},
+ },
+ },
+ readOnly=True,
+ )
+
+ registry.register(
+ "searchDataSource", _searchDataSource,
+ description=(
+ "Search for files within a data source. Accepts either dataSourceId OR connectionId+service."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "dataSourceId": {"type": "string", "description": "DataSource ID"},
+ "connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
+ "service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
+ "path": {"type": "string", "description": "Scope path (used with connectionId+service)"},
+ "query": {"type": "string", "description": "Search query"},
+ },
+ "required": ["query"],
+ },
+ readOnly=True,
+ )
+
+ registry.register(
+ "downloadFromDataSource", _downloadFromDataSource,
+ description=(
+ "Download a file or email from a data source into local storage. Returns a local file ID "
+ "to read with readFile. Accepts either dataSourceId OR connectionId+service. "
+ "For email sources (Outlook, Gmail), browse/search only return subjects -- use this to get full content."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "dataSourceId": {"type": "string", "description": "DataSource ID"},
+ "connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
+ "service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
+ "filePath": {"type": "string", "description": "Path of the file to download (from browseDataSource results)"},
+ "fileName": {"type": "string", "description": "File name with extension (e.g. 'report.pdf')"},
+ },
+ "required": ["dataSourceId", "filePath"],
+ },
+ readOnly=False,
+ )
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py
new file mode 100644
index 00000000..30810374
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py
@@ -0,0 +1,403 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Document and vision tools (containers, content objects, image description)."""
+
+import logging
+from typing import Any, Dict, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _registerDocumentTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ # ---- Document tools (Smart Documents / Container Handling) ----
+
+ async def _browseContainer(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="browseContainer", success=False, error="fileId is required")
+ try:
+ knowledgeService = services.getService("knowledge")
+ index = knowledgeService.getFileContentIndex(fileId)
+ if not index:
+ return ToolResult(toolCallId="", toolName="browseContainer", success=True, data="No content index available for this file. It may not have been indexed yet.")
+ structure = index.get("structure", {}) if isinstance(index, dict) else {}
+ objectSummary = index.get("objectSummary", []) if isinstance(index, dict) else []
+ totalObjects = index.get("totalObjects", 0) if isinstance(index, dict) else 0
+
+ result = f"File: {index.get('fileName', '?')} ({index.get('mimeType', '?')})\n"
+ result += f"Total content objects: {totalObjects}\n"
+
+ sections = structure.get("sections", [])
+ if sections:
+ result += "\nSections:\n"
+ for s in sections:
+ result += f" [{s.get('id', '?')}] {s.get('title', 'Untitled')} (pages {s.get('startPage', '?')}-{s.get('endPage', '?')})\n"
+
+ if structure.get("pageMap"):
+ pages = len(structure["pageMap"])
+ result += f"\nPages: {pages}\n"
+ imgCount = structure.get("imageCount", 0)
+ tableCount = structure.get("tableCount", 0)
+ if imgCount:
+ result += f"Images: {imgCount}\n"
+ if tableCount:
+ result += f"Tables: {tableCount}\n"
+
+ if structure.get("sheetMap"):
+ result += "\nSheets:\n"
+ for s in structure["sheetMap"]:
+ result += f" {s.get('sheetName', '?')} ({s.get('rows', '?')} rows x {s.get('columns', '?')} cols)\n"
+
+ if structure.get("slideMap"):
+ result += "\nSlides:\n"
+ for s in structure["slideMap"]:
+ result += f" Slide {s.get('slideIndex', 0) + 1}: {s.get('title', '(no title)')}\n"
+
+ return ToolResult(toolCallId="", toolName="browseContainer", success=True, data=result)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="browseContainer", success=False, error=str(e))
+
+ async def _readContentObjects(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="readContentObjects", success=False, error="fileId is required")
+ try:
+ knowledgeService = services.getService("knowledge")
+ filterDict = {}
+ if args.get("pageIndex") is not None:
+ filterDict["pageIndex"] = args["pageIndex"]
+ if args.get("contentType"):
+ filterDict["contentType"] = args["contentType"]
+ if args.get("sectionId"):
+ filterDict["sectionId"] = args["sectionId"]
+
+ objects = await knowledgeService.readContentObjects(fileId, filterDict)
+ if not objects:
+ return ToolResult(toolCallId="", toolName="readContentObjects", success=True, data="No content objects found with the given filter.")
+
+ result = f"Found {len(objects)} content objects:\n\n"
+ for obj in objects[:20]:
+ data = obj.get("data", "")
+ cType = obj.get("contentType", "?")
+ ref = obj.get("contextRef", {})
+ location = ref.get("location", "") if isinstance(ref, dict) else ""
+ preview = data[:300] if cType == "text" else f"[{cType} data, {len(data)} chars]"
+ result += f"[{cType}] {location}: {preview}\n\n"
+
+ if len(objects) > 20:
+ result += f"... and {len(objects) - 20} more objects"
+
+ return ToolResult(toolCallId="", toolName="readContentObjects", success=True, data=result)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="readContentObjects", success=False, error=str(e))
+
+ async def _extractContainerItem(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ containerPath = args.get("containerPath", "")
+ if not fileId or not containerPath:
+ return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error="fileId and containerPath are required")
+ try:
+ knowledgeService = services.getService("knowledge")
+ result = await knowledgeService.extractContainerItem(fileId, containerPath)
+ if result:
+ return ToolResult(toolCallId="", toolName="extractContainerItem", success=True, data=str(result))
+ return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error=f"Item '{containerPath}' not found in container index for file {fileId}. On-demand extraction is not yet implemented.")
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error=str(e))
+
+ async def _summarizeContent(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="summarizeContent", success=False, error="fileId is required")
+ try:
+ knowledgeService = services.getService("knowledge")
+ filterDict = {}
+ if args.get("sectionId"):
+ filterDict["sectionId"] = args["sectionId"]
+ if args.get("pageIndex") is not None:
+ filterDict["pageIndex"] = args["pageIndex"]
+ if args.get("contentType"):
+ filterDict["contentType"] = args["contentType"]
+
+ objects = await knowledgeService.readContentObjects(fileId, filterDict)
+ if not objects:
+ return ToolResult(toolCallId="", toolName="summarizeContent", success=True, data="No content found to summarize.")
+
+ textParts = [obj.get("data", "") for obj in objects if obj.get("contentType") != "image"]
+ combinedText = "\n\n".join(textParts)[:6000]
+
+ aiService = services.ai
+ from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
+ summaryRequest = AiCallRequest(
+ prompt=f"Summarize the following content concisely:\n\n{combinedText}",
+ options=AiCallOptions(operationType=OperationTypeEnum.DATA_ANALYSE),
+ )
+ response = await aiService.callAi(summaryRequest)
+ return ToolResult(toolCallId="", toolName="summarizeContent", success=True, data=response.content)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="summarizeContent", success=False, error=str(e))
+
+ registry.register(
+ "browseContainer", _browseContainer,
+ description="Browse the structural index of a document (pages, sections, sheets, slides). Use before readContentObjects for targeted reading.",
+ parameters={
+ "type": "object",
+ "properties": {"fileId": {"type": "string", "description": "The file ID to browse"}},
+ "required": ["fileId"],
+ },
+ readOnly=True,
+ )
+
+ registry.register(
+ "readContentObjects", _readContentObjects,
+ description="Read extracted content objects from a file, optionally filtered by page, section, or type. Use browseContainer first to see the structure.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID"},
+ "pageIndex": {"type": "integer", "description": "Filter by page index"},
+ "sectionId": {"type": "string", "description": "Filter by section ID"},
+ "contentType": {"type": "string", "description": "Filter by content type (text, image, etc.)"},
+ },
+ "required": ["fileId"],
+ },
+ readOnly=True,
+ )
+
+ registry.register(
+ "extractContainerItem", _extractContainerItem,
+ description="Extract a specific item from a container file (ZIP, nested file). Use browseContainer to see available items.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The container file ID"},
+ "containerPath": {"type": "string", "description": "Path within the container"},
+ },
+ "required": ["fileId", "containerPath"],
+ },
+ readOnly=True,
+ )
+
+ registry.register(
+ "summarizeContent", _summarizeContent,
+ description="Generate an AI-powered summary of a file's content. Optionally filter by section, page, or content type.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID"},
+ "sectionId": {"type": "string", "description": "Optional: summarize only this section"},
+ "pageIndex": {"type": "integer", "description": "Optional: summarize only this page"},
+ "contentType": {"type": "string", "description": "Optional: filter by content type"},
+ },
+ "required": ["fileId"],
+ },
+ readOnly=True,
+ )
+
+ # ---- Vision tool ----
+
+ async def _describeImage(args: Dict[str, Any], context: Dict[str, Any]):
+ """Analyse an image using AI vision. Uses Knowledge Store chunks produced by Extractors."""
+ fileId = args.get("fileId", "")
+ prompt = args.get("prompt", "Describe this image in detail. Extract all visible text, tables, and data.")
+ pageIndex = args.get("pageIndex")
+
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="describeImage", success=False, error="fileId is required")
+
+ try:
+ import base64 as _b64
+
+ imageData = None
+ mimeType = "image/png"
+
+ knowledgeService = services.getService("knowledge") if hasattr(services, "getService") else None
+
+ # 1) Knowledge Store: image chunks already produced by PdfExtractor / ImageExtractor
+ if knowledgeService:
+ chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
+ imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
+ if pageIndex is not None:
+ imageChunks = [c for c in imageChunks if c.get("contextRef", {}).get("pageIndex") == pageIndex]
+ if imageChunks:
+ imageData = imageChunks[0].get("data", "")
+ chunkMime = imageChunks[0].get("contextRef", {}).get("mimeType")
+ if chunkMime:
+ mimeType = chunkMime
+
+ # 2) File not yet indexed -> trigger extraction via ExtractionService, then retry
+ if not imageData and knowledgeService and not knowledgeService.isFileIndexed(fileId):
+ try:
+ chatService = services.chat
+ fileInfo = chatService.getFileInfo(fileId)
+ fileContent = chatService.getFileContent(fileId)
+ if fileContent and fileInfo:
+ rawData = fileContent.get("data", "")
+ if isinstance(rawData, str) and len(rawData) > 100:
+ rawBytes = _b64.b64decode(rawData)
+ elif isinstance(rawData, bytes):
+ rawBytes = rawData
+ else:
+ rawBytes = None
+
+ if rawBytes:
+ from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry
+ from modules.serviceCenter.services.serviceExtraction.subPipeline import runExtraction
+ from modules.datamodels.datamodelExtraction import ExtractionOptions
+
+ fileMime = fileInfo.get("mimeType", "application/octet-stream")
+ fileName = fileInfo.get("fileName", fileId)
+ extracted = runExtraction(
+ ExtractorRegistry(), None,
+ rawBytes, fileName, fileMime, ExtractionOptions(),
+ )
+
+ contentObjects = []
+ for part in extracted.parts:
+ tg = (part.typeGroup or "").lower()
+ ct = "image" if tg == "image" else "text"
+ if not part.data or not part.data.strip():
+ continue
+ contentObjects.append({
+ "contentObjectId": part.id,
+ "contentType": ct,
+ "data": part.data,
+ "contextRef": {"containerPath": fileName, "location": part.label, **(part.metadata or {})},
+ })
+
+ if contentObjects:
+ _diFiId, _diMId = _resolveFileScope(fileId, context)
+ await knowledgeService.indexFile(
+ fileId=fileId, fileName=fileName, mimeType=fileMime,
+ userId=context.get("userId", ""), contentObjects=contentObjects,
+ featureInstanceId=_diFiId,
+ mandateId=_diMId,
+ )
+
+ chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
+ imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
+ if pageIndex is not None:
+ imageChunks = [c for c in imageChunks if c.get("contextRef", {}).get("pageIndex") == pageIndex]
+ if imageChunks:
+ imageData = imageChunks[0].get("data", "")
+ except Exception as extractErr:
+ logger.warning(f"describeImage: on-demand extraction failed: {extractErr}")
+
+ # 3) Direct image file (not a container) - use raw file data
+ if not imageData:
+ chatService = services.chat
+ fileContent = chatService.getFileContent(fileId)
+ if fileContent:
+ fileMimeType = fileContent.get("mimeType", "")
+ if fileMimeType.startswith("image/"):
+ imageData = fileContent.get("data", "")
+ mimeType = fileMimeType
+
+ # 4) PDF page rendering: render the requested page as an image via PyMuPDF
+ if not imageData:
+ chatService = services.chat
+ fileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None
+ fileMime = (fileInfo.get("mimeType", "") if fileInfo else "").lower()
+ if fileMime == "application/pdf" or (fileInfo and (fileInfo.get("fileName", "") or "").lower().endswith(".pdf")):
+ try:
+ import fitz as _fitz
+ rawContent = chatService.getFileContent(fileId) if not fileContent else fileContent
+ rawData = rawContent.get("data", "") if rawContent else ""
+ if isinstance(rawData, str) and len(rawData) > 100:
+ pdfBytes = _b64.b64decode(rawData)
+ elif isinstance(rawData, bytes):
+ pdfBytes = rawData
+ else:
+ pdfBytes = None
+ if pdfBytes:
+ doc = _fitz.open(stream=pdfBytes, filetype="pdf")
+ targetPage = pageIndex if pageIndex is not None else 0
+ if 0 <= targetPage < len(doc):
+ page = doc[targetPage]
+ pix = page.get_pixmap(dpi=200)
+ imageData = _b64.b64encode(pix.tobytes("png")).decode("ascii")
+ mimeType = "image/png"
+ logger.info("describeImage: rendered PDF page %d as image (%dx%d)", targetPage, pix.width, pix.height)
+ doc.close()
+ except Exception as pdfErr:
+ logger.warning("describeImage: PDF page rendering failed: %s", pdfErr)
+
+ if not imageData:
+ chatService = services.chat
+ _errFileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None
+ fileName = _errFileInfo.get("fileName", fileId) if _errFileInfo else fileId
+ fileMime = _errFileInfo.get("mimeType", "unknown") if _errFileInfo else "unknown"
+ return ToolResult(toolCallId="", toolName="describeImage", success=False,
+ error=f"No image data found in '{fileName}' (type: {fileMime}). "
+ f"This file likely contains text, not images. Use readFile(fileId=\"{fileId}\") to access its text content.")
+
+ try:
+ rawHead = _b64.b64decode(imageData[:32])
+ if rawHead[:3] == b"\xff\xd8\xff":
+ mimeType = "image/jpeg"
+ elif rawHead[:8] == b"\x89PNG\r\n\x1a\n":
+ mimeType = "image/png"
+ elif rawHead[:4] == b"GIF8":
+ mimeType = "image/gif"
+ elif rawHead[:4] == b"RIFF" and rawHead[8:12] == b"WEBP":
+ mimeType = "image/webp"
+ except Exception as e:
+ logger.warning(f"describeImage: MIME detection from base64 header failed for {fileId}: {e}")
+ dataUrl = f"data:{mimeType};base64,{imageData}"
+ from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum as OTE
+
+ _opType = OTE.IMAGE_ANALYSE
+ try:
+ from modules.datamodels.datamodelFiles import FileItem as _FileItemModel
+ from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO
+ _fRow = _CO().db._loadRecord(_FileItemModel, fileId)
+ if _fRow:
+ _fGet = (lambda k, d=None: _fRow.get(k, d)) if isinstance(_fRow, dict) else (lambda k, d=None: getattr(_fRow, k, d))
+ if bool(_fGet("neutralize", False)):
+ _opType = OTE.NEUTRALIZATION_IMAGE
+ logger.info(f"describeImage: file {fileId} has neutralize=True, using NEUTRALIZATION_IMAGE (internal models only)")
+ except Exception as e:
+ logger.warning(f"describeImage: neutralize flag check failed for {fileId}: {e}")
+
+ visionRequest = AiCallRequest(
+ prompt=prompt,
+ options=AiCallOptions(operationType=_opType),
+ messages=[{"role": "user", "content": [
+ {"type": "text", "text": prompt},
+ {"type": "image_url", "image_url": {"url": dataUrl}},
+ ]}],
+ )
+ visionResponse = await services.ai.callAi(visionRequest)
+
+ if visionResponse.errorCount > 0:
+ return ToolResult(toolCallId="", toolName="describeImage", success=False, error=visionResponse.content)
+ return ToolResult(toolCallId="", toolName="describeImage", success=True, data=visionResponse.content)
+
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="describeImage", success=False, error=str(e))
+
+ registry.register(
+ "describeImage", _describeImage,
+ description="Analyze an image using AI vision. Works with image files and images extracted from PDFs/DOCX/PPTX. Use for OCR, data extraction, and visual analysis.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID containing the image or document with images"},
+ "prompt": {"type": "string", "description": "What to look for in the image (default: describe everything)"},
+ "pageIndex": {"type": "integer", "description": "Filter images by page index (0-based, for multi-page documents)"},
+ },
+ "required": ["fileId"],
+ },
+ readOnly=True,
+ )
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py
new file mode 100644
index 00000000..91fbb81d
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py
@@ -0,0 +1,221 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Feature Data Sub-Agent tool (queryFeatureInstance)."""
+
+import hashlib
+import logging
+import time
+from typing import Any, Dict, List, Optional, Tuple
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+_featureDbConnPool: Dict[str, Any] = {}
+_featureQueryCache: Dict[str, Tuple[float, str]] = {}
+_CACHE_TTL_SECONDS = 300
+
+
+def _getOrCreateFeatureDbConnector(featureDbName: str, userId: str):
+ """Reuse a pooled DB connector for the given feature database."""
+ if featureDbName in _featureDbConnPool:
+ conn = _featureDbConnPool[featureDbName]
+ try:
+ if conn.connection and not conn.connection.closed:
+ return conn
+ except Exception as e:
+ logger.warning(f"Feature DB connection check failed for {featureDbName}: {e}")
+ _featureDbConnPool.pop(featureDbName, None)
+
+ from modules.connectors.connectorDbPostgre import DatabaseConnector
+ from modules.shared.configuration import APP_CONFIG
+ conn = DatabaseConnector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase=featureDbName,
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
+ dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
+ userId=userId,
+ )
+ _featureDbConnPool[featureDbName] = conn
+ return conn
+
+
+def clearFeatureQueryCache(featureInstanceId: Optional[str] = None) -> int:
+ """Clear the feature data query cache. If featureInstanceId given, only for that instance."""
+ if featureInstanceId:
+ prefix = f"{featureInstanceId}:"
+ keys = [k for k in _featureQueryCache if k.startswith(prefix)]
+ else:
+ keys = list(_featureQueryCache.keys())
+ for k in keys:
+ del _featureQueryCache[k]
+ logger.info(f"Feature query cache cleared: {len(keys)} entries removed (instance={featureInstanceId or 'all'})")
+ return len(keys)
+
+
+def _registerFeatureSubAgentTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ # ---- Feature Data Sub-Agent tool ----
+
+ async def _queryFeatureInstance(args: Dict[str, Any], context: Dict[str, Any]):
+ """Delegate a question to the Feature Data Sub-Agent."""
+ featureInstanceId = args.get("featureInstanceId", "")
+ question = args.get("question", "")
+ if not featureInstanceId or not question:
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=False, error="featureInstanceId and question are required",
+ )
+ try:
+ from modules.serviceCenter.services.serviceAgent.featureDataAgent import runFeatureDataAgent
+ from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
+ from modules.interfaces.interfaceDbApp import getRootInterface
+
+ rootIf = getRootInterface()
+ instance = rootIf.getFeatureInstance(featureInstanceId)
+ if not instance:
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=False, error=f"Feature instance {featureInstanceId} not found",
+ )
+
+ featureCode = instance.featureCode
+ mandateId = instance.mandateId or ""
+ instanceLabel = instance.label or ""
+ userId = context.get("userId", "")
+ workspaceInstanceId = context.get("featureInstanceId", "")
+ requestLang = None
+ if userId:
+ langUser = rootIf.getUser(userId)
+ if langUser:
+ requestLang = getattr(langUser, "language", None)
+
+ rootDbConn = rootIf.db if hasattr(rootIf, "db") else None
+ if rootDbConn is None:
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=False, error="No database connector available",
+ )
+
+ featureDataSources = rootDbConn.getRecordset(
+ FeatureDataSource,
+ recordFilter={"featureInstanceId": featureInstanceId, "workspaceInstanceId": workspaceInstanceId},
+ )
+
+ _anySourceNeutralize = any(
+ bool(ds.get("neutralize", False) if isinstance(ds, dict) else getattr(ds, "neutralize", False))
+ for ds in (featureDataSources or [])
+ )
+
+ from modules.security.rbacCatalog import getCatalogService
+ catalog = getCatalogService()
+ tableFilters = {}
+ if not featureDataSources:
+ selectedTables = catalog.getDataObjects(featureCode)
+ else:
+ allObjs = {o["meta"]["table"]: o for o in catalog.getDataObjects(featureCode) if "meta" in o and "table" in o.get("meta", {})}
+ selectedTables = []
+ _wildcardExpanded = False
+ for ds in featureDataSources:
+ tn = ds.get("tableName", "")
+ ok = ds.get("objectKey", "")
+ if ok.endswith(".*") or (not tn and ok):
+ selectedTables = list(allObjs.values())
+ _wildcardExpanded = True
+ break
+ if tn in allObjs:
+ selectedTables.append(allObjs[tn])
+ if not _wildcardExpanded:
+ for ds in featureDataSources:
+ rf = ds.get("recordFilter")
+ if rf and isinstance(rf, dict) and ds.get("tableName"):
+ tableFilters[ds["tableName"]] = rf
+
+ if not selectedTables:
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=False, error=f"No data tables available for feature '{featureCode}'",
+ )
+
+ cacheKey = f"{featureInstanceId}:{hashlib.md5(question.encode()).hexdigest()}"
+ if cacheKey in _featureQueryCache:
+ cachedAt, cachedResult = _featureQueryCache[cacheKey]
+ if time.time() - cachedAt < _CACHE_TTL_SECONDS:
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=True, data=cachedResult,
+ )
+
+ featureDbName = f"poweron_{featureCode.lower()}"
+ featureDbConn = _getOrCreateFeatureDbConnector(featureDbName, userId or "agent")
+
+ aiService = services.ai if hasattr(services, "ai") else None
+ if aiService is None:
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=False, error="AI service not available for sub-agent",
+ )
+
+ async def _subAgentAiCall(req):
+ if _anySourceNeutralize:
+ req.requireNeutralization = True
+ return await aiService.callAi(req)
+
+ answer = await runFeatureDataAgent(
+ question=question,
+ featureInstanceId=featureInstanceId,
+ featureCode=featureCode,
+ selectedTables=selectedTables,
+ mandateId=mandateId,
+ userId=userId,
+ aiCallFn=_subAgentAiCall,
+ dbConnector=featureDbConn,
+ instanceLabel=instanceLabel,
+ tableFilters=tableFilters,
+ requestLang=requestLang,
+ )
+
+ _featureQueryCache[cacheKey] = (time.time(), answer)
+
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=True, data=answer,
+ )
+ except Exception as e:
+ logger.error(f"queryFeatureInstance failed: {e}", exc_info=True)
+ return ToolResult(
+ toolCallId="", toolName="queryFeatureInstance",
+ success=False, error=str(e),
+ )
+
+ registry.register(
+ "queryFeatureInstance", _queryFeatureInstance,
+ description=(
+ "Query data from a feature instance (e.g. Trustee, CommCoach). "
+ "Delegates to a specialized sub-agent that knows the feature's data schema "
+ "and can browse, filter, and aggregate its tables. Use this when the user "
+ "has attached feature data sources or asks about feature-specific data.\n\n"
+ "GUIDELINES:\n"
+ "- Ask a precise, self-contained question (include all context the sub-agent needs).\n"
+ "- Combine related data needs into ONE call instead of multiple small ones.\n"
+ "- Avoid calling this tool repeatedly with slight variations of the same question."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "featureInstanceId": {"type": "string", "description": "ID of the feature instance to query"},
+ "question": {"type": "string", "description": "What data to find or analyze from this feature instance"},
+ },
+ "required": ["featureInstanceId", "question"]
+ },
+ readOnly=True
+ )
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_helpers.py b/modules/serviceCenter/services/serviceAgent/coreTools/_helpers.py
new file mode 100644
index 00000000..c8793775
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_helpers.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Shared helpers for core agent tools (file scope, binary detection, temp folder)."""
+
+import logging
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+_MAX_TOOL_RESULT_CHARS = 50_000
+
+_BINARY_SIGNATURES = (b"%PDF", b"\x89PNG", b"\xff\xd8\xff", b"GIF8", b"PK\x03\x04", b"Rar!", b"\x1f\x8b")
+
+
+def _resolveFileScope(fileId: str, context: dict) -> tuple:
+ """Resolve featureInstanceId and mandateId for a file from context or management DB.
+
+ Returns (featureInstanceId, mandateId) — never None, always strings.
+ """
+ fiId = context.get("featureInstanceId", "") or ""
+ mId = context.get("mandateId", "") or ""
+ if fiId and mId:
+ return fiId, mId
+ try:
+ from modules.datamodels.datamodelFiles import FileItem
+ from modules.interfaces.interfaceDbManagement import ComponentObjects
+ fm = ComponentObjects().db._loadRecord(FileItem, fileId)
+ if fm:
+ _get = (lambda k: fm.get(k, "")) if isinstance(fm, dict) else (lambda k: getattr(fm, k, ""))
+ fiId = fiId or str(_get("featureInstanceId") or "")
+ mId = mId or str(_get("mandateId") or "")
+ except Exception as e:
+ logger.warning(f"_resolveFileScope failed for fileId={fileId}: {e}")
+ return fiId, mId
+
+
+def _looksLikeBinary(data: bytes, sampleSize: int = 1024) -> bool:
+ """Detect binary content by checking for magic bytes and non-printable char ratio."""
+ if any(data[:8].startswith(sig) for sig in _BINARY_SIGNATURES):
+ return True
+ sample = data[:sampleSize]
+ if not sample:
+ return False
+ nonPrintable = sum(1 for b in sample if b < 0x09 or (0x0E <= b < 0x20 and b != 0x1B))
+ return nonPrintable / len(sample) > 0.10
+
+
+def _getOrCreateInstanceFolder(chatService, featureInstanceId: str, mandateId: str = "") -> Optional[str]:
+ """Return the folder ID for a feature instance, creating it on first use.
+
+ Delegates to interfaceDbManagement._ensureFeatureInstanceFolder.
+ AI tools call this when saving a file without an explicit folderId
+ so that instance-produced files land in a named folder automatically.
+ """
+ try:
+ dbMgmt = chatService.interfaceDbComponent
+ return dbMgmt._ensureFeatureInstanceFolder(featureInstanceId, mandateId)
+ except Exception as e:
+ logger.warning(f"Could not get/create instance folder for {featureInstanceId}: {e}")
+ return None
+
+
+def _getOrCreateTempFolder(chatService) -> Optional[str]:
+ """Return the ID of the root-level 'Temp' folder, creating it if it doesn't exist."""
+ try:
+ allFolders = chatService.interfaceDbComponent.listFolders()
+ tempFolder = next(
+ (f for f in allFolders
+ if f.get("name") == "Temp" and not f.get("parentId")),
+ None,
+ )
+ if tempFolder:
+ return tempFolder.get("id")
+ newFolder = chatService.interfaceDbComponent.createFolder("Temp", parentId=None)
+ return newFolder.get("id") if newFolder else None
+ except Exception as e:
+ logger.warning(f"Could not get/create Temp folder: {e}")
+ return None
+
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_mediaTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_mediaTools.py
new file mode 100644
index 00000000..da6e616c
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_mediaTools.py
@@ -0,0 +1,958 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Media and utility tools (render, TTS, STT, image gen, charts, neutralize, code exec)."""
+
+import logging
+from typing import Any, Dict, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _registerMediaTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ # ---- Document rendering tool ----
+
+ def _markdownToDocumentJson(markdown: str, title: str, language: str = "de") -> Dict[str, Any]:
+ """Convert markdown content to the standard document JSON format expected by renderers."""
+ import re as _re
+
+ sections = []
+ order = 0
+ lines = markdown.split("\n")
+ i = 0
+
+ def _nextId():
+ nonlocal order
+ order += 1
+ return f"s_{order}"
+
+ while i < len(lines):
+ line = lines[i]
+
+ # --- Headings ---
+ headingMatch = _re.match(r'^(#{1,6})\s+(.+)', line)
+ if headingMatch:
+ level = len(headingMatch.group(1))
+ text = headingMatch.group(2).strip()
+ sections.append({
+ "id": _nextId(), "content_type": "heading", "order": order,
+ "elements": [{"content": {"text": text, "level": level}}],
+ })
+ i += 1
+ continue
+
+ # --- Fenced code blocks ---
+ codeMatch = _re.match(r'^```(\w*)', line)
+ if codeMatch:
+ lang = codeMatch.group(1) or "text"
+ codeLines = []
+ i += 1
+ while i < len(lines) and not lines[i].startswith("```"):
+ codeLines.append(lines[i])
+ i += 1
+ i += 1
+ sections.append({
+ "id": _nextId(), "content_type": "code_block", "order": order,
+ "elements": [{"content": {"code": "\n".join(codeLines), "language": lang}}],
+ })
+ continue
+
+ # --- Tables ---
+ tableMatch = _re.match(r'^\|(.+)\|$', line)
+ if tableMatch and (i + 1) < len(lines) and _re.match(r'^\|[\s\-:|]+\|$', lines[i + 1]):
+ headerCells = [c.strip() for c in tableMatch.group(1).split("|")]
+ i += 2
+ rows = []
+ while i < len(lines) and _re.match(r'^\|(.+)\|$', lines[i]):
+ rowCells = [c.strip() for c in lines[i][1:-1].split("|")]
+ rows.append(rowCells)
+ i += 1
+ sections.append({
+ "id": _nextId(), "content_type": "table", "order": order,
+ "elements": [{"content": {"headers": headerCells, "rows": rows}}],
+ })
+ continue
+
+ # --- Bullet / numbered lists ---
+ listMatch = _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', line)
+ if listMatch:
+ isNumbered = bool(_re.match(r'\d+[.)]', listMatch.group(2)))
+ items = []
+ while i < len(lines) and _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', lines[i]):
+ m = _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', lines[i])
+ items.append({"text": m.group(3).strip()})
+ i += 1
+ sections.append({
+ "id": _nextId(), "content_type": "bullet_list", "order": order,
+ "elements": [{"content": {"items": items, "list_type": "numbered" if isNumbered else "bullet"}}],
+ })
+ continue
+
+ # --- Empty lines (skip) ---
+ if not line.strip():
+ i += 1
+ continue
+
+ # --- Images:  or  ---
+ imgMatch = _re.match(r'^!\[([^\]]*)\]\(([^)]+)\)', line)
+ if imgMatch:
+ altText = imgMatch.group(1).strip() or "Image"
+ src = imgMatch.group(2).strip()
+ fileId = ""
+ if src.startswith("file:"):
+ fileId = src[5:]
+ sections.append({
+ "id": _nextId(), "content_type": "image", "order": order,
+ "elements": [{
+ "content": {
+ "altText": altText,
+ "base64Data": "",
+ "_fileRef": fileId,
+ "_srcUrl": src if not fileId else "",
+ }
+ }],
+ })
+ i += 1
+ continue
+
+ # --- Paragraph (collect consecutive non-empty lines) ---
+ paraLines = []
+ while i < len(lines) and lines[i].strip() and not _re.match(r'^(#{1,6}\s|```|\|.+\||!\[|(\s*)([-*+]|\d+[.)]) )', lines[i]):
+ paraLines.append(lines[i])
+ i += 1
+ if paraLines:
+ sections.append({
+ "id": _nextId(), "content_type": "paragraph", "order": order,
+ "elements": [{"content": {"text": " ".join(paraLines)}}],
+ })
+ continue
+
+ i += 1
+
+ if not sections:
+ sections.append({
+ "id": _nextId(), "content_type": "paragraph", "order": order,
+ "elements": [{"content": {"text": markdown.strip() or "(empty)"}}],
+ })
+
+ return {
+ "metadata": {
+ "split_strategy": "single_document",
+ "source_documents": [],
+ "extraction_method": "agent_rendering",
+ "title": title,
+ "language": language,
+ },
+ "documents": [{
+ "id": "doc_1",
+ "title": title,
+ "sections": sections,
+ }],
+ }
+
+ async def _renderDocument(args: Dict[str, Any], context: Dict[str, Any]):
+ """Render agent-produced markdown content into any document format via the RendererRegistry."""
+ import re as _re
+ sourceFileId = (args.get("sourceFileId") or "").strip()
+ content = args.get("content", "")
+ if not isinstance(content, str):
+ content = str(content) if content is not None else ""
+ outputFormat = args.get("outputFormat", "pdf")
+ title = args.get("title", "Document")
+ language = args.get("language", "de")
+
+ if sourceFileId:
+ try:
+ dbMgmt = services.chat.interfaceDbComponent
+ fileRow = dbMgmt.getFile(sourceFileId)
+ if not fileRow:
+ return ToolResult(
+ toolCallId="",
+ toolName="renderDocument",
+ success=False,
+ error=f"sourceFileId not found: {sourceFileId}",
+ )
+ rawBytes = dbMgmt.getFileData(sourceFileId)
+ if not rawBytes:
+ return ToolResult(
+ toolCallId="",
+ toolName="renderDocument",
+ success=False,
+ error=f"sourceFileId has no data: {sourceFileId}",
+ )
+ try:
+ content = rawBytes.decode("utf-8")
+ except UnicodeDecodeError:
+ content = rawBytes.decode("latin-1", errors="replace")
+ except Exception as e:
+ return ToolResult(
+ toolCallId="",
+ toolName="renderDocument",
+ success=False,
+ error=f"Could not read sourceFileId: {e}",
+ )
+
+ if not (content or "").strip():
+ return ToolResult(
+ toolCallId="",
+ toolName="renderDocument",
+ success=False,
+ error=(
+ "Provide non-empty `content` (markdown) or `sourceFileId` (id of a .md/.txt from writeFile). "
+ "For long documents use writeFile create+append, then renderDocument(sourceFileId=...)."
+ ),
+ )
+
+ modelMaxTokens = context.get("modelMaxOutputTokens", 0)
+ _inlineCharLimit = int(modelMaxTokens * 3 * 0.5) if modelMaxTokens > 0 else 6000
+ _inlineCharLimit = max(_inlineCharLimit, 3000)
+
+ if not sourceFileId and len(content) > _inlineCharLimit:
+ return ToolResult(
+ toolCallId="",
+ toolName="renderDocument",
+ success=False,
+ error=(
+ f"Inline `content` is {len(content)} chars — over the {_inlineCharLimit} char limit "
+ f"(derived from model output budget of {modelMaxTokens} tokens). "
+ "Large documents must use the file path:\n"
+ "1. writeFile(mode='create', name='draft.md', content=)\n"
+ "2. writeFile(mode='append', fileId=, content=) — repeat as needed\n"
+ "3. renderDocument(sourceFileId=, outputFormat='pdf', title='...')\n"
+ "This avoids output truncation entirely."
+ ),
+ )
+
+ try:
+ structuredContent = _markdownToDocumentJson(content, title, language)
+
+ # Resolve image file references (file:fileId) to base64 data from Knowledge Store
+ knowledgeService = None
+ try:
+ knowledgeService = services.getService("knowledge")
+ except Exception as e:
+ logger.warning(f"renderDocument: knowledge service unavailable: {e}")
+ resolvedImages = 0
+ for doc in structuredContent.get("documents", []):
+ for section in doc.get("sections", []):
+ if section.get("content_type") != "image":
+ continue
+ for element in section.get("elements", []):
+ contentObj = element.get("content", {})
+ fileRef = contentObj.get("_fileRef", "")
+ if not fileRef or contentObj.get("base64Data"):
+ continue
+ if knowledgeService:
+ chunks = knowledgeService._knowledgeDb.getContentChunks(fileRef)
+ imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
+ if imageChunks:
+ contentObj["base64Data"] = imageChunks[0].get("data", "")
+ chunkMime = imageChunks[0].get("contextRef", {}).get("mimeType", "image/png")
+ contentObj["mimeType"] = chunkMime
+ resolvedImages += 1
+ if not contentObj.get("base64Data"):
+ try:
+ rawBytes = services.chat.getFileData(fileRef)
+ if rawBytes:
+ import base64 as _b64
+ contentObj["base64Data"] = _b64.b64encode(rawBytes).decode("ascii")
+ contentObj["mimeType"] = "image/png"
+ resolvedImages += 1
+ except Exception as e:
+ logger.warning(f"renderDocument: image resolve failed for fileRef={fileRef}: {e}")
+ contentObj.pop("_fileRef", None)
+ contentObj.pop("_srcUrl", None)
+
+ sectionCount = len(structuredContent.get("documents", [{}])[0].get("sections", []))
+ logger.info(f"renderDocument: parsed {sectionCount} sections from markdown ({len(content)} chars), resolved {resolvedImages} image(s), format={outputFormat}")
+
+ generationService = services.getService("generation")
+ documents = await generationService.renderReport(
+ extractedContent=structuredContent,
+ outputFormat=outputFormat,
+ language=language,
+ title=title,
+ userPrompt=content,
+ )
+
+ if not documents:
+ return ToolResult(toolCallId="", toolName="renderDocument", success=False, error="Rendering produced no output")
+
+ savedFiles = []
+ sideEvents = []
+ chatService = services.chat
+
+ sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "document"
+
+ for doc in documents:
+ docData = doc.documentData if hasattr(doc, "documentData") else b""
+ docName = doc.filename if hasattr(doc, "filename") else f"{sanitizedTitle}.{outputFormat}"
+ docMime = doc.mimeType if hasattr(doc, "mimeType") else "application/octet-stream"
+
+ if not docName.lower().endswith(f".{outputFormat}"):
+ docName = f"{sanitizedTitle}.{outputFormat}"
+
+ fileItem = None
+ if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
+ fileItem = chatService.interfaceDbComponent.saveGeneratedFile(docData, docName, docMime)
+ else:
+ fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docData, docName)
+
+ if fileItem:
+ fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
+ fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
+ if fiId:
+ chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
+ tempFolderId = _getOrCreateTempFolder(chatService)
+ if tempFolderId:
+ chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
+ savedFiles.append(f"- {docName} (id: {fid})")
+ sideEvents.append({
+ "type": "fileCreated",
+ "data": {
+ "fileId": fid,
+ "fileName": docName,
+ "mimeType": docMime,
+ "fileSize": len(docData),
+ },
+ })
+
+ result = f"Rendered {len(documents)} document(s):\n" + "\n".join(savedFiles)
+ return ToolResult(toolCallId="", toolName="renderDocument", success=True, data=result, sideEvents=sideEvents)
+
+ except Exception as e:
+ logger.error(f"renderDocument failed: {e}")
+ return ToolResult(toolCallId="", toolName="renderDocument", success=False, error=str(e))
+
+ registry.register(
+ "renderDocument", _renderDocument,
+ description=(
+ "Render markdown into a document file (PDF, DOCX, XLSX, PPTX, CSV, HTML, MD, JSON, TXT). "
+ "For long documents: write markdown with writeFile (mode=create then append chunks), then call this tool with "
+ "`sourceFileId` only (tiny JSON — avoids model output truncation). For short docs you may pass `content` inline. "
+ "Images:  in the markdown."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "content": {
+ "type": "string",
+ "description": "Full markdown inline. Prefer `sourceFileId` when the document is large (many KB).",
+ },
+ "sourceFileId": {
+ "type": "string",
+ "description": "Chat file id of markdown saved via writeFile (create+append). Use this instead of `content` for long PDFs.",
+ },
+ "outputFormat": {"type": "string", "description": "Target format: pdf, docx, xlsx, pptx, csv, html, md, json, txt", "default": "pdf"},
+ "title": {"type": "string", "description": "Document title", "default": "Document"},
+ "language": {"type": "string", "description": "Document language (ISO 639-1)", "default": "de"},
+ },
+ },
+ readOnly=False,
+ )
+
+ # ── textToSpeech tool ──────────────────────────────────────────────
+ def _stripMarkdownForTts(text: str) -> str:
+ """Strip markdown formatting so TTS reads clean speech text."""
+ import re as _re
+ t = text
+ t = _re.sub(r'\*\*(.+?)\*\*', r'\1', t)
+ t = _re.sub(r'\*(.+?)\*', r'\1', t)
+ t = _re.sub(r'__(.+?)__', r'\1', t)
+ t = _re.sub(r'_(.+?)_', r'\1', t)
+ t = _re.sub(r'`[^`]+`', lambda m: m.group(0)[1:-1], t)
+ t = _re.sub(r'^#{1,6}\s*', '', t, flags=_re.MULTILINE)
+ t = _re.sub(r'^\s*[-*+]\s+', '', t, flags=_re.MULTILINE)
+ t = _re.sub(r'^\s*\d+\.\s+', '', t, flags=_re.MULTILINE)
+ t = _re.sub(r'\[(.+?)\]\(.+?\)', r'\1', t)
+ t = _re.sub(r'!\[.*?\]\(.*?\)', '', t)
+ t = _re.sub(r'\n{3,}', '\n\n', t)
+ return t.strip()
+
+ async def _textToSpeech(args: Dict[str, Any], context: Dict[str, Any]):
+ """Convert text to speech using Google Cloud TTS, deliver audio via SSE."""
+ import base64 as _b64
+ text = args.get("text", "")
+ language = args.get("language", "auto")
+ voiceName = args.get("voiceName")
+
+ if not text:
+ return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="text is required")
+
+ cleanText = _stripMarkdownForTts(text)
+ if not cleanText:
+ return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="text is empty after stripping markdown")
+
+ try:
+ from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+ mandateId = context.get("mandateId", "")
+ voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
+
+ _ISO_TO_BCP47 = {
+ "de": "de-DE", "en": "en-US", "fr": "fr-FR", "it": "it-IT",
+ "es": "es-ES", "pt": "pt-BR", "nl": "nl-NL", "pl": "pl-PL",
+ "ru": "ru-RU", "ja": "ja-JP", "zh": "zh-CN", "ko": "ko-KR",
+ "ar": "ar-XA", "hi": "hi-IN", "tr": "tr-TR", "sv": "sv-SE",
+ }
+
+ if language == "auto":
+ try:
+ snippet = cleanText[:500]
+ detectResult = await voiceInterface.detectLanguage(snippet)
+ if detectResult and detectResult.get("success"):
+ detected = detectResult.get("language", "de")
+ language = _ISO_TO_BCP47.get(detected, detected)
+ if "-" not in language:
+ language = _ISO_TO_BCP47.get(language, f"{language}-{language.upper()}")
+ logger.info(f"textToSpeech: auto-detected language '{detected}' -> '{language}'")
+ else:
+ language = "de-DE"
+ except Exception as detectErr:
+ logger.warning(f"textToSpeech: language detection failed: {detectErr}, defaulting to de-DE")
+ language = "de-DE"
+
+ if not voiceName:
+ try:
+ from modules.datamodels.datamodelUam import UserVoicePreferences
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ userId = context.get("userId", "")
+ if userId:
+ rootIf = getRootInterface()
+ prefRecords = rootIf.db.getRecordset(
+ UserVoicePreferences,
+ recordFilter={"userId": userId}
+ )
+ if prefRecords:
+ allPrefs = [
+ r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else r
+ for r in prefRecords
+ ]
+ _mid = str(mandateId or "").strip()
+ scopedPref = next((p for p in allPrefs if str(p.get("mandateId") or "").strip() == _mid), None)
+ globalPref = next((p for p in allPrefs if not str(p.get("mandateId") or "").strip()), None)
+
+ def _resolveVoiceFromMap(prefDict, lang):
+ vm = (prefDict or {}).get("ttsVoiceMap", {}) or {}
+ if not isinstance(vm, dict) or not vm:
+ return None
+ baseLang = lang.split("-")[0].lower() if isinstance(lang, str) and lang else ""
+ langNorm = str(lang or "").strip()
+ if langNorm in vm:
+ entry = vm[langNorm]
+ return entry.get("voiceName") if isinstance(entry, dict) else entry
+ if baseLang and baseLang in vm:
+ entry = vm[baseLang]
+ return entry.get("voiceName") if isinstance(entry, dict) else entry
+ if baseLang:
+ for mk, mv in vm.items():
+ mkn = str(mk).lower()
+ if mkn == baseLang or mkn.startswith(f"{baseLang}-"):
+ return mv.get("voiceName") if isinstance(mv, dict) else mv
+ return None
+
+ voiceName = (
+ _resolveVoiceFromMap(scopedPref, language)
+ or _resolveVoiceFromMap(globalPref, language)
+ or _resolveVoiceFromMap(allPrefs[0], language)
+ )
+ if not voiceName:
+ for candidate in [globalPref, scopedPref, allPrefs[0]]:
+ if candidate and candidate.get("ttsVoice") and candidate.get("ttsLanguage") == language:
+ voiceName = candidate["ttsVoice"]
+ break
+ if voiceName:
+ logger.info(f"textToSpeech: using configured voice '{voiceName}' for language '{language}'")
+ except Exception as prefErr:
+ logger.debug(f"textToSpeech: could not load voice preferences: {prefErr}")
+
+ ttsResult = await voiceInterface.textToSpeech(
+ text=cleanText,
+ languageCode=language,
+ voiceName=voiceName,
+ )
+
+ if not ttsResult or not ttsResult.get("success"):
+ errMsg = ttsResult.get("error", "TTS call failed") if ttsResult else "TTS returned None"
+ return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error=errMsg)
+
+ audioContent = ttsResult.get("audioContent", "")
+ if not audioContent:
+ return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="TTS returned no audio")
+
+ if isinstance(audioContent, bytes):
+ audioB64 = _b64.b64encode(audioContent).decode("ascii")
+ elif isinstance(audioContent, str):
+ audioB64 = audioContent
+ else:
+ audioB64 = str(audioContent)
+
+ audioFormat = ttsResult.get("audioFormat", "mp3")
+ charCount = len(cleanText)
+ usedVoice = voiceName or "default"
+ logger.info(f"textToSpeech: generated {audioFormat} audio for {charCount} chars, language={language}, voice={usedVoice}")
+
+ return ToolResult(
+ toolCallId="", toolName="textToSpeech", success=True,
+ data=f"Audio generated ({charCount} characters, language={language}, voice={usedVoice}). Playing in chat.",
+ sideEvents=[{
+ "type": "voiceResponse",
+ "data": {
+ "audio": audioB64,
+ "format": audioFormat,
+ "language": language,
+ "charCount": charCount,
+ },
+ }],
+ )
+
+ except ImportError:
+ return ToolResult(toolCallId="", toolName="textToSpeech", success=False,
+ error="Voice interface not available (missing dependency)")
+ except Exception as e:
+ logger.error(f"textToSpeech failed: {e}")
+ return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error=str(e))
+
+ registry.register(
+ "textToSpeech", _textToSpeech,
+ description=(
+ "Convert text to speech audio. The audio is played directly in the chat. "
+ "Use this when the user asks you to read something aloud, narrate, or speak. "
+ "Language is auto-detected from the text content. You do NOT need to specify a language."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "text": {"type": "string", "description": "The text to convert to speech. Can include markdown (will be stripped automatically)."},
+ "language": {"type": "string", "description": "BCP-47 language code (e.g. de-DE, en-US) or 'auto' for automatic detection", "default": "auto"},
+ "voiceName": {"type": "string", "description": "Optional specific voice name. If omitted, uses the configured voice for the detected language."},
+ },
+ "required": ["text"],
+ },
+ readOnly=False,
+ )
+
+ # ── generateImage tool ─────────────────────────────────────────────
+
+ async def _generateImage(args: Dict[str, Any], context: Dict[str, Any]):
+ """Generate an image from a text prompt using AI (DALL-E)."""
+ import re as _re
+
+ prompt = (args.get("prompt") or "").strip()
+ style = (args.get("style") or "").strip() or None
+ title = (args.get("title") or "").strip() or "Generated Image"
+
+ if not prompt:
+ return ToolResult(toolCallId="", toolName="generateImage", success=False, error="prompt is required")
+
+ try:
+ from modules.serviceCenter.services.serviceGeneration.paths.imagePath import ImageGenerationPath
+
+ imagePath = ImageGenerationPath(services)
+ aiResponse = await imagePath.generateImages(
+ userPrompt=prompt,
+ count=1,
+ style=style,
+ format="png",
+ title=title,
+ )
+
+ if not aiResponse.documents:
+ return ToolResult(toolCallId="", toolName="generateImage", success=False, error="Image generation returned no image data")
+
+ sideEvents = []
+ savedFiles = []
+ chatService = services.chat
+ sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "generated_image"
+
+ for doc in aiResponse.documents:
+ docData = doc.documentData if hasattr(doc, "documentData") else b""
+ docName = doc.documentName if hasattr(doc, "documentName") else f"{sanitizedTitle}.png"
+ docMime = doc.mimeType if hasattr(doc, "mimeType") else "image/png"
+
+ if not docName.lower().endswith(".png"):
+ docName = f"{sanitizedTitle}.png"
+
+ fileItem = None
+ if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
+ fileItem = chatService.interfaceDbComponent.saveGeneratedFile(docData, docName, docMime)
+ else:
+ fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docData, docName)
+
+ if fileItem:
+ fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
+ fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
+ if fiId:
+ chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
+ tempFolderId = _getOrCreateTempFolder(chatService)
+ if tempFolderId:
+ chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
+ savedFiles.append(f"- {docName} (id: {fid})")
+ sideEvents.append({
+ "type": "fileCreated",
+ "data": {
+ "fileId": fid,
+ "fileName": docName,
+ "mimeType": docMime,
+ "fileSize": len(docData),
+ },
+ })
+
+ result = f"Generated {len(aiResponse.documents)} image(s):\n" + "\n".join(savedFiles)
+ return ToolResult(toolCallId="", toolName="generateImage", success=True, data=result, sideEvents=sideEvents)
+
+ except Exception as e:
+ logger.error(f"generateImage failed: {e}")
+ return ToolResult(toolCallId="", toolName="generateImage", success=False, error=str(e))
+
+ registry.register(
+ "generateImage", _generateImage,
+ description=(
+ "Generate an image from a text description using AI (DALL-E). "
+ "The generated image is saved as a file in the workspace. "
+ "Use this when the user asks to create, generate, draw, or design an image, illustration, icon, logo, diagram, or any visual content. "
+ "Provide a detailed, descriptive prompt for best results."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "prompt": {"type": "string", "description": "Detailed description of the image to generate. Be specific about subject, composition, colors, style, and mood."},
+ "style": {"type": "string", "description": "Optional style modifier (e.g. 'photorealistic', 'watercolor', 'digital art', 'minimalist', 'sketch')"},
+ "title": {"type": "string", "description": "Title/filename for the generated image", "default": "Generated Image"},
+ },
+ "required": ["prompt"],
+ },
+ readOnly=False,
+ )
+
+ # ── createChart tool ─────────────────────────────────────────────────
+
+ async def _createChart(args: Dict[str, Any], context: Dict[str, Any]):
+ """Create a data chart as PNG image using matplotlib."""
+ import re as _re
+
+ chartType = (args.get("chartType") or "bar").strip().lower()
+ title = (args.get("title") or "Chart").strip()
+ labels = args.get("labels") or []
+ datasets = args.get("datasets") or []
+ xLabel = (args.get("xLabel") or "").strip()
+ yLabel = (args.get("yLabel") or "").strip()
+ width = min(max(args.get("width") or 10, 4), 20)
+ height = min(max(args.get("height") or 6, 3), 14)
+ colors = args.get("colors") or None
+
+ if not datasets:
+ return ToolResult(toolCallId="", toolName="createChart", success=False, error="datasets is required (list of {label, values})")
+
+ try:
+ import matplotlib
+ matplotlib.use("Agg")
+ import logging as _mpllog
+ _mpllog.getLogger("matplotlib").setLevel(_mpllog.WARNING)
+ import matplotlib.pyplot as plt
+ import io
+
+ _DEFAULT_COLORS = [
+ "#4285F4", "#EA4335", "#FBBC04", "#34A853", "#FF6D01",
+ "#46BDC6", "#7B61FF", "#F538A0", "#00ACC1", "#AB47BC",
+ ]
+ usedColors = colors if colors and len(colors) >= len(datasets) else _DEFAULT_COLORS
+
+ fig, ax = plt.subplots(figsize=(width, height))
+ fig.patch.set_facecolor("#FFFFFF")
+ ax.set_facecolor("#FAFAFA")
+
+ if chartType in ("pie", "donut"):
+ values = datasets[0].get("values", []) if datasets else []
+ explode = [0.02] * len(values)
+ wedges, texts, autotexts = ax.pie(
+ values, labels=labels, autopct="%1.1f%%",
+ colors=usedColors[:len(values)], explode=explode,
+ textprops={"fontsize": 9},
+ )
+ if chartType == "donut":
+ ax.add_artist(plt.Circle((0, 0), 0.55, fc="white"))
+ ax.set_title(title, fontsize=14, fontweight="bold", pad=16)
+
+ else:
+ import numpy as _np
+ x = _np.arange(len(labels)) if labels else _np.arange(max(len(d.get("values", [])) for d in datasets))
+ barWidth = 0.8 / max(len(datasets), 1)
+
+ for i, ds in enumerate(datasets):
+ dsLabel = ds.get("label", f"Series {i+1}")
+ values = ds.get("values", [])
+ color = usedColors[i % len(usedColors)]
+
+ if chartType == "bar":
+ offset = (i - len(datasets) / 2 + 0.5) * barWidth
+ ax.bar(x + offset, values, barWidth, label=dsLabel, color=color, edgecolor="white", linewidth=0.5)
+ elif chartType == "horizontalbar":
+ offset = (i - len(datasets) / 2 + 0.5) * barWidth
+ ax.barh(x + offset, values, barWidth, label=dsLabel, color=color, edgecolor="white", linewidth=0.5)
+ elif chartType == "line":
+ ax.plot(x[:len(values)], values, marker="o", markersize=5, label=dsLabel, color=color, linewidth=2)
+ elif chartType == "area":
+ ax.fill_between(x[:len(values)], values, alpha=0.3, color=color)
+ ax.plot(x[:len(values)], values, label=dsLabel, color=color, linewidth=2)
+ elif chartType == "scatter":
+ ax.scatter(x[:len(values)], values, label=dsLabel, color=color, s=50, edgecolors="white", linewidth=0.5)
+ else:
+ ax.bar(x, values, label=dsLabel, color=color)
+
+ if labels:
+ if chartType == "horizontalbar":
+ ax.set_yticks(x)
+ ax.set_yticklabels(labels, fontsize=9)
+ else:
+ ax.set_xticks(x)
+ ax.set_xticklabels(labels, fontsize=9, rotation=45 if len(labels) > 6 else 0, ha="right" if len(labels) > 6 else "center")
+
+ ax.set_title(title, fontsize=14, fontweight="bold", pad=12)
+ if xLabel:
+ ax.set_xlabel(xLabel, fontsize=10)
+ if yLabel:
+ ax.set_ylabel(yLabel, fontsize=10)
+ if len(datasets) > 1:
+ ax.legend(fontsize=9, framealpha=0.9)
+ ax.grid(axis="y", alpha=0.3, linestyle="--")
+ ax.spines["top"].set_visible(False)
+ ax.spines["right"].set_visible(False)
+
+ plt.tight_layout()
+ buf = io.BytesIO()
+ fig.savefig(buf, format="png", dpi=150, bbox_inches="tight")
+ plt.close(fig)
+ pngData = buf.getvalue()
+
+ chatService = services.chat
+ sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "chart"
+ fileName = f"{sanitizedTitle}.png"
+
+ if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
+ fileItem = chatService.interfaceDbComponent.saveGeneratedFile(pngData, fileName, "image/png")
+ else:
+ fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(pngData, fileName)
+
+ fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?") if isinstance(fileItem, dict) else "?"
+ fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
+ if fiId and fid != "?":
+ chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
+ tempFolderId = _getOrCreateTempFolder(chatService)
+ if tempFolderId and fid != "?":
+ chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
+
+ sideEvents = [{"type": "fileCreated", "data": {
+ "fileId": fid, "fileName": fileName,
+ "mimeType": "image/png", "fileSize": len(pngData),
+ }}]
+ return ToolResult(
+ toolCallId="", toolName="createChart", success=True,
+ data=f"Chart saved as '{fileName}' (id: {fid}, {len(pngData)} bytes). "
+ f"Embed in documents with: ",
+ sideEvents=sideEvents,
+ )
+
+ except Exception as e:
+ logger.error(f"createChart failed: {e}", exc_info=True)
+ return ToolResult(toolCallId="", toolName="createChart", success=False, error=str(e))
+
+ registry.register(
+ "createChart", _createChart,
+ description=(
+ "Create a data chart/graph as a PNG image using matplotlib. "
+ "Supported types: bar, horizontalBar, line, area, scatter, pie, donut. "
+ "The chart is saved as a file in the workspace. "
+ "Use the returned fileId to embed in documents via renderDocument: . "
+ "Provide structured data with labels and datasets."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "chartType": {
+ "type": "string",
+ "enum": ["bar", "horizontalBar", "line", "area", "scatter", "pie", "donut"],
+ "description": "Chart type (default: bar)",
+ },
+ "title": {"type": "string", "description": "Chart title"},
+ "labels": {
+ "type": "array", "items": {"type": "string"},
+ "description": "X-axis labels / category names",
+ },
+ "datasets": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "label": {"type": "string", "description": "Series name (legend)"},
+ "values": {"type": "array", "items": {"type": "number"}, "description": "Data values"},
+ },
+ "required": ["values"],
+ },
+ "description": "Data series to plot",
+ },
+ "xLabel": {"type": "string", "description": "X-axis label"},
+ "yLabel": {"type": "string", "description": "Y-axis label"},
+ "colors": {
+ "type": "array", "items": {"type": "string"},
+ "description": "Custom hex colors for series (e.g. ['#4285F4', '#EA4335'])",
+ },
+ "width": {"type": "number", "description": "Figure width in inches (4-20, default 10)"},
+ "height": {"type": "number", "description": "Figure height in inches (3-14, default 6)"},
+ },
+ "required": ["datasets"],
+ },
+ readOnly=False,
+ )
+
+ # ── Phase 3: speechToText, detectLanguage, neutralizeData, executeCode ──
+
+ async def _speechToText(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="speechToText", success=False, error="fileId is required")
+ try:
+ chatService = services.chat
+ audioData = chatService.interfaceDbComponent.getFileData(fileId)
+ if not audioData:
+ return ToolResult(toolCallId="", toolName="speechToText", success=False, error=f"No data found for file {fileId}")
+ from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+ mandateId = context.get("mandateId", "")
+ voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
+ language = args.get("language", "de-DE")
+ result = await voiceInterface.speechToText(audioData, language=language)
+ if result and result.get("success"):
+ transcript = result.get("text", "")
+ confidence = result.get("confidence", 0)
+ return ToolResult(
+ toolCallId="", toolName="speechToText", success=True,
+ data=f"Transcript (confidence: {confidence:.0%}):\n{transcript}"
+ )
+ return ToolResult(toolCallId="", toolName="speechToText", success=False, error=result.get("error", "Transcription failed"))
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="speechToText", success=False, error=str(e))
+
+ async def _detectLanguage(args: Dict[str, Any], context: Dict[str, Any]):
+ text = args.get("text", "")
+ if not text:
+ return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error="text is required")
+ try:
+ from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+ mandateId = context.get("mandateId", "")
+ voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
+ result = await voiceInterface.detectLanguage(text)
+ if result and result.get("success"):
+ lang = result.get("language", "unknown")
+ return ToolResult(toolCallId="", toolName="detectLanguage", success=True, data=f"Detected language: {lang}")
+ return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error=result.get("error", "Detection failed"))
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error=str(e))
+
+ async def _neutralizeData(args: Dict[str, Any], context: Dict[str, Any]):
+ text = args.get("text", "")
+ fileId = args.get("fileId", "")
+ if not text and not fileId:
+ return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="text or fileId is required")
+ try:
+ neutralizationService = services.getService("neutralization")
+ if not neutralizationService:
+ return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="Neutralization service not available")
+ if not neutralizationService.interfaceDbComponent:
+ neutralizationService.interfaceDbComponent = services.chat.interfaceDbComponent
+ if text:
+ result = await neutralizationService.processTextAsync(text, fileId or None)
+ else:
+ result = neutralizationService.processFile(fileId)
+ if result:
+ neutralized = result.get("neutralized_text", "") or result.get("result", str(result))
+ return ToolResult(toolCallId="", toolName="neutralizeData", success=True, data=neutralized)
+ return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="Neutralization returned no result")
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error=str(e))
+
+ async def _executeCode(args: Dict[str, Any], context: Dict[str, Any]):
+ code = args.get("code", "")
+ language = args.get("language", "python")
+ if not code:
+ return ToolResult(toolCallId="", toolName="executeCode", success=False, error="code is required")
+ if language != "python":
+ return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"Language '{language}' not supported. Only 'python' is available.")
+ try:
+ from modules.serviceCenter.services.serviceAgent.sandboxExecutor import executePython
+ result = await executePython(code)
+ if result.get("success"):
+ output = result.get("output", "(no output)")
+ return ToolResult(toolCallId="", toolName="executeCode", success=True, data=output)
+ error = result.get("error", "Execution failed")
+ tb = result.get("traceback", "")
+ return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"{error}\n{tb}" if tb else error)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="executeCode", success=False, error=str(e))
+
+ registry.register(
+ "speechToText", _speechToText,
+ description="Transcribe an audio file to text using speech recognition. Returns the transcript with confidence score.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "Audio file ID from the workspace"},
+ "language": {"type": "string", "description": "BCP-47 language code (e.g. 'de-DE', 'en-US'). Default: 'de-DE'"},
+ },
+ "required": ["fileId"]
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "detectLanguage", _detectLanguage,
+ description="Detect the language of a text snippet. Returns ISO 639-1 code (e.g. 'de', 'en').",
+ parameters={
+ "type": "object",
+ "properties": {
+ "text": {"type": "string", "description": "Text to analyze"},
+ },
+ "required": ["text"]
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "neutralizeData", _neutralizeData,
+ description="Anonymize text or file content by replacing personal data (names, addresses, etc.) with placeholders. Non-destructive -- returns the anonymized copy.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "text": {"type": "string", "description": "Text to anonymize"},
+ "fileId": {"type": "string", "description": "File ID to anonymize (alternative to text)"},
+ },
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "executeCode", _executeCode,
+ description=(
+ "Execute Python code in a sandboxed environment for calculations and data analysis. "
+ "Available modules: math, statistics, json, csv, re, datetime, collections, itertools, functools, decimal, fractions, random. "
+ "No file system, network, or OS access. Max 30s execution time. "
+ "Use print() to produce output."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "code": {"type": "string", "description": "Python code to execute"},
+ "language": {"type": "string", "description": "Programming language (only 'python' supported)", "default": "python"},
+ },
+ "required": ["code"]
+ },
+ readOnly=True
+ )
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_workspaceTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_workspaceTools.py
new file mode 100644
index 00000000..98ee94b9
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/_workspaceTools.py
@@ -0,0 +1,955 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Workspace and file management tools (read, write, search, folders, web, translate)."""
+
+import logging
+from typing import Any, Dict, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._helpers import (
+ _getOrCreateInstanceFolder,
+ _getOrCreateTempFolder,
+ _looksLikeBinary,
+ _resolveFileScope,
+ _MAX_TOOL_RESULT_CHARS,
+)
+
+logger = logging.getLogger(__name__)
+
+
+import uuid as _uuid
+
+def _registerWorkspaceTools(registry: ToolRegistry, services):
+ """Auto-extracted from registerCoreTools."""
+ import uuid as _uuid
+
+ # ---- Read-only tools ----
+
+ def _applyOffsetLimit(text: str, offset: int = None, limit: int = None) -> str:
+ """Apply line-based offset/limit to text content, returning numbered lines."""
+ if offset is None and limit is None:
+ return None
+ lines = text.split("\n")
+ totalLines = len(lines)
+ startLine = max(0, (offset or 1) - 1)
+ endLine = min(totalLines, startLine + (limit or 200))
+ selected = lines[startLine:endLine]
+ numbered = "\n".join(f"{i + startLine + 1}|{line}" for i, line in enumerate(selected))
+ header = f"[Lines {startLine + 1}-{endLine} of {totalLines} total]\n"
+ return header + numbered
+
+ async def _readFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ offset = args.get("offset")
+ limit = args.get("limit")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="readFile", success=False, error="fileId is required")
+ try:
+ knowledgeService = services.getService("knowledge") if hasattr(services, "getService") else None
+
+ # 1) Knowledge Store: return already-extracted text chunks
+ if knowledgeService:
+ fileStatus = knowledgeService.getFileStatus(fileId)
+ if fileStatus == "indexed":
+ chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
+ textChunks = [
+ c for c in (chunks or [])
+ if c.get("contentType") != "image" and c.get("data")
+ ]
+ if textChunks:
+ assembled = "\n\n".join(c["data"] for c in textChunks)
+ chunked = _applyOffsetLimit(assembled, offset, limit)
+ if chunked is not None:
+ return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
+ if len(assembled) > _MAX_TOOL_RESULT_CHARS:
+ assembled = assembled[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated – showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(assembled)}. Use offset/limit to read specific sections.]"
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data=assembled,
+ )
+ elif fileStatus in ("processing", "embedding", "extracted"):
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data=f"[File {fileId} is currently being processed (status: {fileStatus}). Try again shortly.]",
+ )
+
+ # 2) Not indexed yet: try on-demand extraction
+ chatService = services.chat
+ fileInfo = chatService.getFileInfo(fileId)
+ if not fileInfo:
+ return ToolResult(toolCallId="", toolName="readFile", success=True, data="File not found.")
+
+ fileName = fileInfo.get("fileName", fileId)
+ mimeType = fileInfo.get("mimeType", "")
+
+ _BINARY_TYPES = ("application/pdf", "image/", "application/vnd.", "application/zip",
+ "application/x-zip", "application/x-tar", "application/x-7z",
+ "application/msword", "application/octet-stream",
+ "message/rfc822")
+ isBinary = any(mimeType.startswith(t) for t in _BINARY_TYPES)
+
+ rawBytes = chatService.getFileData(fileId)
+ if not rawBytes:
+ return ToolResult(toolCallId="", toolName="readFile", success=True, data="File data not accessible.")
+
+ if not isBinary:
+ isBinary = _looksLikeBinary(rawBytes)
+
+ if isBinary:
+ try:
+ from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry, ChunkerRegistry
+ from modules.serviceCenter.services.serviceExtraction.subPipeline import runExtraction
+ from modules.datamodels.datamodelExtraction import ExtractionOptions
+
+ extracted = runExtraction(
+ ExtractorRegistry(), ChunkerRegistry(),
+ rawBytes, fileName, mimeType, ExtractionOptions(),
+ )
+
+ contentObjects = []
+ for part in extracted.parts:
+ tg = (part.typeGroup or "").lower()
+ ct = "image" if tg == "image" else "text"
+ if not part.data or not part.data.strip():
+ continue
+ contentObjects.append({
+ "contentObjectId": part.id,
+ "contentType": ct,
+ "data": part.data,
+ "contextRef": {
+ "containerPath": fileName,
+ "location": part.label or "file",
+ **(part.metadata or {}),
+ },
+ })
+
+ if contentObjects:
+ if knowledgeService:
+ try:
+ userId = context.get("userId", "")
+ _fiId, _mId = _resolveFileScope(fileId, context)
+ await knowledgeService.indexFile(
+ fileId=fileId, fileName=fileName, mimeType=mimeType,
+ userId=userId, contentObjects=contentObjects,
+ featureInstanceId=_fiId,
+ mandateId=_mId,
+ )
+ except Exception as e:
+ logger.warning(f"readFile: knowledge indexing failed for {fileId}: {e}")
+
+ joined = ""
+ if knowledgeService:
+ _chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
+ _textChunks = [
+ c for c in (_chunks or [])
+ if c.get("contentType") != "image" and c.get("data")
+ ]
+ if _textChunks:
+ joined = "\n\n".join(c["data"] for c in _textChunks)
+ if not joined:
+ textParts = [o["data"] for o in contentObjects if o["contentType"] != "image"]
+ joined = "\n\n".join(textParts) if textParts else ""
+ if joined:
+ chunked = _applyOffsetLimit(joined, offset, limit)
+ if chunked is not None:
+ return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
+ if len(joined) > _MAX_TOOL_RESULT_CHARS:
+ joined = joined[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated – showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(joined)}. Use offset/limit to read specific sections.]"
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data=joined,
+ )
+ imgCount = sum(1 for o in contentObjects if o["contentType"] == "image")
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data=f"[Extracted {len(contentObjects)} content objects from '{fileName}' "
+ f"({imgCount} images, no readable text). "
+ f"Use describeImage(fileId='{fileId}') to analyze visual content.]",
+ )
+ except Exception as extractErr:
+ logger.warning(f"readFile extraction failed for {fileId} ({fileName}): {extractErr}")
+
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data=f"[Binary file: '{fileName}', type={mimeType}, size={len(rawBytes)} bytes. "
+ f"Text extraction not available. Use describeImage for images.]",
+ )
+
+ # 3) Text file: decode raw bytes
+ for encoding in ("utf-8", "utf-8-sig", "latin-1"):
+ try:
+ text = rawBytes.decode(encoding)
+ if text.strip():
+ _fileNeedNeutralize = False
+ try:
+ from modules.datamodels.datamodelFiles import FileItem as _FI
+ from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO
+ _fRec = _CO().db._loadRecord(_FI, fileId)
+ if _fRec:
+ _fG = (lambda k, d=None: _fRec.get(k, d)) if isinstance(_fRec, dict) else (lambda k, d=None: getattr(_fRec, k, d))
+ _fileNeedNeutralize = bool(_fG("neutralize", False))
+ except Exception as e:
+ logger.warning(f"readFile: neutralize flag check failed for {fileId}: {e}")
+ if _fileNeedNeutralize:
+ try:
+ _nSvc = services.getService("neutralization") if hasattr(services, "getService") else None
+ if _nSvc and hasattr(_nSvc, 'processTextAsync'):
+ _nResult = await _nSvc.processTextAsync(text, fileId)
+ if _nResult and _nResult.get("neutralized_text"):
+ text = _nResult["neutralized_text"]
+ logger.debug(f"readFile: neutralized text for file {fileId}")
+ else:
+ logger.warning(f"readFile: neutralization failed for file {fileId}, blocking text (fail-safe)")
+ return ToolResult(toolCallId="", toolName="readFile", success=True,
+ data="[File requires neutralization but neutralization failed. Content blocked for data protection.]")
+ else:
+ logger.warning(f"readFile: neutralization required but service unavailable for file {fileId}")
+ return ToolResult(toolCallId="", toolName="readFile", success=True,
+ data="[File requires neutralization but service unavailable. Content blocked for data protection.]")
+ except Exception as _nErr:
+ logger.error(f"readFile: neutralization error for file {fileId}: {_nErr}")
+ return ToolResult(toolCallId="", toolName="readFile", success=True,
+ data="[File requires neutralization but an error occurred. Content blocked for data protection.]")
+ chunked = _applyOffsetLimit(text, offset, limit)
+ if chunked is not None:
+ return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
+ if len(text) > _MAX_TOOL_RESULT_CHARS:
+ text = text[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated – showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(text)}. Use offset/limit to read specific sections.]"
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data=text,
+ )
+ except (UnicodeDecodeError, ValueError):
+ continue
+
+ return ToolResult(
+ toolCallId="", toolName="readFile", success=True,
+ data="File is empty or could not be decoded.",
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="readFile", success=False, error=str(e))
+
+ async def _listFiles(args: Dict[str, Any], context: Dict[str, Any]):
+ try:
+ chatService = services.chat
+ files = chatService.listFiles(
+ folderId=args.get("folderId"),
+ tags=args.get("tags"),
+ search=args.get("search"),
+ )
+ fileList = "\n".join(
+ f"- {f.get('fileName', 'unknown')} (id: {f.get('id', '?')}, "
+ f"type: {f.get('mimeType', '?')}, size: {f.get('fileSize', '?')}, "
+ f"tags: {f.get('tags', [])}, status: {f.get('status', 'n/a')})"
+ for f in files
+ ) if files else "No files found."
+ return ToolResult(toolCallId="", toolName="listFiles", success=True, data=fileList)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="listFiles", success=False, error=str(e))
+
+ async def _searchInFileContent(args: Dict[str, Any], context: Dict[str, Any]):
+ import re as _re
+ fileId = args.get("fileId", "")
+ query = args.get("query", "")
+ contextLines = args.get("contextLines", 2)
+ if not fileId or not query:
+ return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error="fileId and query are required")
+ try:
+ chatService = services.chat
+ rawBytes = chatService.getFileData(fileId)
+ if not rawBytes:
+ return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error="File data not accessible")
+ try:
+ content = rawBytes.decode("utf-8")
+ except UnicodeDecodeError:
+ content = rawBytes.decode("latin-1", errors="replace")
+
+ lines = content.split("\n")
+ pattern = _re.compile(_re.escape(query), _re.IGNORECASE)
+ matches = []
+ for i, line in enumerate(lines):
+ if pattern.search(line):
+ start = max(0, i - contextLines)
+ end = min(len(lines), i + contextLines + 1)
+ snippet = "\n".join(f"{j + 1}|{lines[j]}" for j in range(start, end))
+ matches.append(snippet)
+
+ if not matches:
+ return ToolResult(toolCallId="", toolName="searchInFileContent", success=True,
+ data=f"No matches for '{query}' in file.")
+
+ shown = matches[:20]
+ resultText = f"Found {len(matches)} match(es) for '{query}':\n\n" + "\n---\n".join(shown)
+ if len(matches) > 20:
+ resultText += f"\n\n... and {len(matches) - 20} more matches"
+ return ToolResult(toolCallId="", toolName="searchInFileContent", success=True, data=resultText)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error=str(e))
+
+ async def _listFolders(args: Dict[str, Any], context: Dict[str, Any]):
+ try:
+ chatService = services.chat
+ folders = chatService.listFolders(parentId=args.get("parentId"))
+ folderList = "\n".join(
+ f"- {f.get('name', 'unnamed')} (id: {f.get('id', '?')})"
+ for f in folders
+ ) if folders else "No folders found."
+ return ToolResult(toolCallId="", toolName="listFolders", success=True, data=folderList)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="listFolders", success=False, error=str(e))
+
+ async def _webSearch(args: Dict[str, Any], context: Dict[str, Any]):
+ query = args.get("query", "")
+ if not query:
+ return ToolResult(toolCallId="", toolName="webSearch", success=False, error="query is required")
+ try:
+ webService = services.getService("web")
+ result = await webService.performWebResearch(
+ prompt=query,
+ urls=[],
+ country=None,
+ language=args.get("language"),
+ )
+ summary = result.get("summary", "") if isinstance(result, dict) else str(result)
+ return ToolResult(
+ toolCallId="", toolName="webSearch", success=True,
+ data=summary or str(result)
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="webSearch", success=False, error=str(e))
+
+ # ---- Write tools ----
+
+ async def _tagFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ tags = args.get("tags", [])
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="tagFile", success=False, error="fileId is required")
+ try:
+ chatService = services.chat
+ chatService.interfaceDbComponent.updateFile(fileId, {"tags": tags})
+ return ToolResult(
+ toolCallId="", toolName="tagFile", success=True,
+ data=f"Tags updated to {tags} for file {fileId}"
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="tagFile", success=False, error=str(e))
+
+ async def _moveFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ targetFolderId = args.get("targetFolderId")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="moveFile", success=False, error="fileId is required")
+ try:
+ chatService = services.chat
+ chatService.interfaceDbComponent.updateFile(fileId, {"folderId": targetFolderId})
+ return ToolResult(
+ toolCallId="", toolName="moveFile", success=True,
+ data=f"File {fileId} moved to folder {targetFolderId or 'root'}"
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="moveFile", success=False, error=str(e))
+
+ async def _createFolder(args: Dict[str, Any], context: Dict[str, Any]):
+ name = args.get("name", "")
+ if not name:
+ return ToolResult(toolCallId="", toolName="createFolder", success=False, error="name is required")
+ try:
+ chatService = services.chat
+ folder = chatService.createFolder(name=name, parentId=args.get("parentId"))
+ return ToolResult(
+ toolCallId="", toolName="createFolder", success=True,
+ data=f"Folder '{name}' created (id: {folder.get('id', '?')})"
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="createFolder", success=False, error=str(e))
+
+ async def _writeFile(args: Dict[str, Any], context: Dict[str, Any]):
+ content = args.get("content", "")
+ mode = args.get("mode", "create")
+ fileId = args.get("fileId", "")
+ name = args.get("name", "")
+
+ if not content:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error="content is required")
+
+ try:
+ chatService = services.chat
+ dbMgmt = chatService.interfaceDbComponent
+
+ if mode == "append":
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error="fileId is required for mode=append")
+ file = dbMgmt.getFile(fileId)
+ if not file:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error=f"File {fileId} not found")
+ existingData = dbMgmt.getFileData(fileId) or b""
+ try:
+ existingText = existingData.decode("utf-8")
+ except UnicodeDecodeError:
+ existingText = existingData.decode("latin-1", errors="replace")
+ newContent = existingText + content
+ dbMgmt.updateFileData(fileId, newContent.encode("utf-8"))
+ dbMgmt.updateFile(fileId, {"fileSize": len(newContent.encode("utf-8"))})
+ return ToolResult(
+ toolCallId="", toolName="writeFile", success=True,
+ data=f"Appended {len(content)} chars to '{file.fileName}' (id: {fileId}, total: {len(newContent)} chars)",
+ sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": file.fileName}}],
+ )
+
+ if mode == "overwrite":
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error="fileId is required for mode=overwrite")
+ file = dbMgmt.getFile(fileId)
+ if not file:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error=f"File {fileId} not found")
+ dbMgmt.updateFileData(fileId, content.encode("utf-8"))
+ dbMgmt.updateFile(fileId, {"fileSize": len(content.encode("utf-8"))})
+ return ToolResult(
+ toolCallId="", toolName="writeFile", success=True,
+ data=f"Overwritten '{file.fileName}' (id: {fileId}, {len(content)} chars)",
+ sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": file.fileName}}],
+ )
+
+ # mode == "create" (default)
+ if not name:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error="name is required for mode=create")
+ fileItem, _ = dbMgmt.saveUploadedFile(content.encode("utf-8"), name)
+ fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
+ if fiId:
+ dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId})
+ if args.get("folderId"):
+ dbMgmt.updateFile(fileItem.id, {"folderId": args["folderId"]})
+ elif fiId:
+ instanceFolderId = _getOrCreateInstanceFolder(chatService, fiId, context.get("mandateId", ""))
+ if instanceFolderId:
+ dbMgmt.updateFile(fileItem.id, {"folderId": instanceFolderId})
+ if args.get("tags"):
+ dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]})
+ return ToolResult(
+ toolCallId="", toolName="writeFile", success=True,
+ data=f"File '{name}' created (id: {fileItem.id})",
+ sideEvents=[{
+ "type": "fileCreated",
+ "data": {
+ "fileId": fileItem.id,
+ "fileName": name,
+ "mimeType": fileItem.mimeType,
+ "fileSize": fileItem.fileSize,
+ },
+ }],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="writeFile", success=False, error=str(e))
+
+ # ---- Register all tools ----
+
+ registry.register(
+ "readFile", _readFile,
+ description=(
+ "Read the content of a file. Returns full content by default. "
+ "For large files, use offset and limit to read specific line ranges. "
+ "When truncated, the response tells the total line count so you can paginate."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to read"},
+ "offset": {"type": "integer", "description": "Start reading from this line number (1-based). Omit for full file."},
+ "limit": {"type": "integer", "description": "Max number of lines to return (default: all). Use with offset for chunked reading."},
+ },
+ "required": ["fileId"]
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "listFiles", _listFiles,
+ description=(
+ "List files in the local workspace. Filter by folder, tags, or search term. "
+ "For external data sources, use browseDataSource instead."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "folderId": {"type": "string", "description": "Filter by folder ID"},
+ "tags": {"type": "array", "items": {"type": "string"}, "description": "Filter by tags (any match)"},
+ "search": {"type": "string", "description": "Search in file names and descriptions"},
+ }
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "searchInFileContent", _searchInFileContent,
+ description=(
+ "Search for text within a file's content. Returns matching lines with context. "
+ "Case-insensitive. Use to locate specific text before using replaceInFile, "
+ "or to find relevant sections in a large file before reading with offset/limit."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to search in"},
+ "query": {"type": "string", "description": "Text to search for (case-insensitive)"},
+ "contextLines": {"type": "integer", "description": "Number of context lines around each match (default: 2)"},
+ },
+ "required": ["fileId", "query"]
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "listFolders", _listFolders,
+ description="List folders in the local workspace. For external data sources, use browseDataSource instead.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
+ }
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "webSearch", _webSearch,
+ description="Search the web for general information. Use readUrl to fetch content from a known URL instead.",
+ parameters={
+ "type": "object",
+ "properties": {"query": {"type": "string", "description": "Search query"}},
+ "required": ["query"]
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "tagFile", _tagFile,
+ description="Set or update tags on a file for categorization and filtering via listFiles.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID"},
+ "tags": {"type": "array", "items": {"type": "string"}, "description": "Tags to set"},
+ },
+ "required": ["fileId", "tags"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "moveFile", _moveFile,
+ description="Move a file to a different folder in the local workspace.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to move"},
+ "targetFolderId": {"type": "string", "description": "Target folder ID (null for root)"},
+ },
+ "required": ["fileId"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "createFolder", _createFolder,
+ description="Create a new folder in the local workspace.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "name": {"type": "string", "description": "Folder name"},
+ "parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
+ },
+ "required": ["name"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "writeFile", _writeFile,
+ description=(
+ "Create, append, or overwrite a file. Modes:\n"
+ "- create (default): create a new file (name required).\n"
+ "- append: append content to an existing file (fileId required). "
+ "Use for large content that exceeds a single tool call (~8000 chars per call).\n"
+ "- overwrite: replace entire file content (fileId required)."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "name": {"type": "string", "description": "File name (required for mode=create)"},
+ "content": {"type": "string", "description": "Content to write/append"},
+ "mode": {"type": "string", "enum": ["create", "append", "overwrite"], "description": "Write mode (default: create)"},
+ "fileId": {"type": "string", "description": "File ID (required for mode=append/overwrite)"},
+ "folderId": {"type": "string", "description": "Target folder ID (mode=create only)"},
+ "tags": {"type": "array", "items": {"type": "string"}, "description": "Tags (mode=create only)"},
+ },
+ "required": ["content"]
+ },
+ readOnly=False
+ )
+
+ # ---- Phase 1: deleteFile, renameFile, readUrl, translateText ----
+
+ async def _deleteFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="deleteFile", success=False, error="fileId is required")
+ try:
+ chatService = services.chat
+ file = chatService.interfaceDbComponent.getFile(fileId)
+ if not file:
+ return ToolResult(toolCallId="", toolName="deleteFile", success=False, error=f"File {fileId} not found")
+ fileName = file.fileName
+ try:
+ knowledgeService = services.getService("knowledge")
+ if knowledgeService and hasattr(knowledgeService, "removeFile"):
+ knowledgeService.removeFile(fileId)
+ except Exception as e:
+ logger.warning(f"deleteFile: knowledge store cleanup failed for {fileId}: {e}")
+ chatService.interfaceDbComponent.deleteFile(fileId)
+ return ToolResult(
+ toolCallId="", toolName="deleteFile", success=True,
+ data=f"File '{fileName}' (id: {fileId}) deleted",
+ sideEvents=[{"type": "fileDeleted", "data": {"fileId": fileId, "fileName": fileName}}],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="deleteFile", success=False, error=str(e))
+
+ async def _renameFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ newName = args.get("newName", "")
+ if not fileId or not newName:
+ return ToolResult(toolCallId="", toolName="renameFile", success=False, error="fileId and newName are required")
+ try:
+ chatService = services.chat
+ chatService.interfaceDbComponent.updateFile(fileId, {"fileName": newName})
+ return ToolResult(
+ toolCallId="", toolName="renameFile", success=True,
+ data=f"File {fileId} renamed to '{newName}'",
+ sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": newName}}],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="renameFile", success=False, error=str(e))
+
+ async def _readUrl(args: Dict[str, Any], context: Dict[str, Any]):
+ url = args.get("url", "")
+ if not url:
+ return ToolResult(toolCallId="", toolName="readUrl", success=False, error="url is required")
+ try:
+ webService = services.getService("web")
+ result = await webService._performWebCrawl(
+ instruction="Extract all content from this page",
+ urls=[url],
+ maxDepth=1,
+ maxWidth=1,
+ )
+ if isinstance(result, list) and result:
+ content = "\n\n".join(
+ item.get("content", "") or item.get("text", "") or str(item)
+ for item in result if item
+ )
+ elif isinstance(result, dict):
+ content = result.get("content", "") or result.get("summary", "") or str(result)
+ else:
+ content = str(result) if result else "No content retrieved"
+ _MAX = 30000
+ if len(content) > _MAX:
+ content = content[:_MAX] + f"\n\n... (truncated at {_MAX} chars)"
+ return ToolResult(toolCallId="", toolName="readUrl", success=True, data=content)
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="readUrl", success=False, error=str(e))
+
+ async def _translateText(args: Dict[str, Any], context: Dict[str, Any]):
+ text = args.get("text", "")
+ targetLanguage = args.get("targetLanguage", "")
+ if not text or not targetLanguage:
+ return ToolResult(toolCallId="", toolName="translateText", success=False, error="text and targetLanguage are required")
+ try:
+ from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
+ mandateId = context.get("mandateId", "")
+ voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
+ sourceLanguage = args.get("sourceLanguage", "auto")
+ result = await voiceInterface.translateText(text, sourceLanguage=sourceLanguage, targetLanguage=targetLanguage)
+ if result and result.get("success"):
+ translated = result.get("translated_text", "")
+ return ToolResult(toolCallId="", toolName="translateText", success=True, data=translated)
+ return ToolResult(toolCallId="", toolName="translateText", success=False, error=result.get("error", "Translation failed"))
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="translateText", success=False, error=str(e))
+
+ registry.register(
+ "deleteFile", _deleteFile,
+ description="Permanently delete a file from the local workspace.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to delete"},
+ },
+ "required": ["fileId"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "renameFile", _renameFile,
+ description="Rename a file in the local workspace. Include the file extension in the new name.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to rename"},
+ "newName": {"type": "string", "description": "New file name including extension"},
+ },
+ "required": ["fileId", "newName"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "readUrl", _readUrl,
+ description=(
+ "Read and extract content from a specific URL. "
+ "Use when the user provides a specific URL to read, or when you need to fetch content from a known web page. "
+ "For general information searches, use webSearch instead."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "url": {"type": "string", "description": "The URL to read"},
+ },
+ "required": ["url"]
+ },
+ readOnly=True
+ )
+
+ registry.register(
+ "translateText", _translateText,
+ description=(
+ "Translate text to a target language using Google Cloud Translation. "
+ "More efficient than AI translation for large text volumes. "
+ "Use ISO language codes (e.g. 'en', 'de', 'fr', 'es', 'it', 'pt', 'zh', 'ja', 'ko', 'ar')."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "text": {"type": "string", "description": "Text to translate"},
+ "targetLanguage": {"type": "string", "description": "Target language ISO code (e.g. 'en', 'de', 'fr')"},
+ "sourceLanguage": {"type": "string", "description": "Source language ISO code (default: auto-detect)"},
+ },
+ "required": ["text", "targetLanguage"]
+ },
+ readOnly=True
+ )
+
+ # ---- Phase 2: deleteFolder, renameFolder, moveFolder, copyFile, editFile ----
+
+ async def _deleteFolder(args: Dict[str, Any], context: Dict[str, Any]):
+ folderId = args.get("folderId", "")
+ recursive = args.get("recursive", False)
+ if not folderId:
+ return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error="folderId is required")
+ try:
+ chatService = services.chat
+ result = chatService.interfaceDbComponent.deleteFolder(folderId, recursive=recursive)
+ summary = f"Deleted {result.get('deletedFolders', 1)} folder(s) and {result.get('deletedFiles', 0)} file(s)"
+ return ToolResult(
+ toolCallId="", toolName="deleteFolder", success=True, data=summary,
+ sideEvents=[{"type": "folderDeleted", "data": {"folderId": folderId, **result}}],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error=str(e))
+
+ async def _renameFolder(args: Dict[str, Any], context: Dict[str, Any]):
+ folderId = args.get("folderId", "")
+ newName = args.get("newName", "")
+ if not folderId or not newName:
+ return ToolResult(toolCallId="", toolName="renameFolder", success=False, error="folderId and newName are required")
+ try:
+ chatService = services.chat
+ chatService.interfaceDbComponent.renameFolder(folderId, newName)
+ return ToolResult(
+ toolCallId="", toolName="renameFolder", success=True,
+ data=f"Folder {folderId} renamed to '{newName}'",
+ sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "name": newName}}],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="renameFolder", success=False, error=str(e))
+
+ async def _moveFolder(args: Dict[str, Any], context: Dict[str, Any]):
+ folderId = args.get("folderId", "")
+ targetParentId = args.get("targetParentId")
+ if not folderId:
+ return ToolResult(toolCallId="", toolName="moveFolder", success=False, error="folderId is required")
+ try:
+ chatService = services.chat
+ chatService.interfaceDbComponent.moveFolder(folderId, targetParentId)
+ return ToolResult(
+ toolCallId="", toolName="moveFolder", success=True,
+ data=f"Folder {folderId} moved to {targetParentId or 'root'}",
+ sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "parentId": targetParentId}}],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="moveFolder", success=False, error=str(e))
+
+ async def _copyFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ if not fileId:
+ return ToolResult(toolCallId="", toolName="copyFile", success=False, error="fileId is required")
+ try:
+ chatService = services.chat
+ copiedFile = chatService.interfaceDbComponent.copyFile(
+ fileId,
+ targetFolderId=args.get("targetFolderId"),
+ newFileName=args.get("newFileName"),
+ )
+ return ToolResult(
+ toolCallId="", toolName="copyFile", success=True,
+ data=f"File copied as '{copiedFile.fileName}' (id: {copiedFile.id})",
+ sideEvents=[{
+ "type": "fileCreated",
+ "data": {"fileId": copiedFile.id, "fileName": copiedFile.fileName,
+ "mimeType": copiedFile.mimeType, "fileSize": copiedFile.fileSize},
+ }],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="copyFile", success=False, error=str(e))
+
+ async def _replaceInFile(args: Dict[str, Any], context: Dict[str, Any]):
+ fileId = args.get("fileId", "")
+ oldText = args.get("oldText", "")
+ newText = args.get("newText", "")
+ replaceAll = args.get("replaceAll", False)
+ if not fileId or not oldText:
+ return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="fileId and oldText are required")
+ try:
+ chatService = services.chat
+ dbMgmt = chatService.interfaceDbComponent
+ file = dbMgmt.getFile(fileId)
+ if not file:
+ return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=f"File {fileId} not found")
+ if not dbMgmt.isTextMimeType(file.mimeType):
+ return ToolResult(
+ toolCallId="", toolName="replaceInFile", success=False,
+ error=f"Cannot edit binary file ({file.mimeType}). Only text-based files are supported."
+ )
+ rawData = dbMgmt.getFileData(fileId)
+ if not rawData:
+ return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="File has no content")
+ try:
+ oldContent = rawData.decode("utf-8")
+ except UnicodeDecodeError:
+ return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="File content is not valid UTF-8 text")
+
+ count = oldContent.count(oldText)
+ if count == 0:
+ return ToolResult(
+ toolCallId="", toolName="replaceInFile", success=False,
+ error="oldText not found in file. Use readFile or searchInFileContent to verify the exact text."
+ )
+ if count > 1 and not replaceAll:
+ return ToolResult(
+ toolCallId="", toolName="replaceInFile", success=False,
+ error=f"oldText found {count} times. Set replaceAll=true or provide more surrounding context to make it unique."
+ )
+
+ newContent = oldContent.replace(oldText, newText) if replaceAll else oldContent.replace(oldText, newText, 1)
+
+ editId = str(_uuid.uuid4())
+ label = f"all {count} occurrences" if replaceAll else "1 occurrence"
+ return ToolResult(
+ toolCallId="", toolName="replaceInFile", success=True,
+ data=f"Edit proposed for '{file.fileName}': replaced {label}. Waiting for user review.",
+ sideEvents=[{
+ "type": "fileEditProposal",
+ "data": {
+ "id": editId,
+ "fileId": fileId,
+ "fileName": file.fileName,
+ "mimeType": file.mimeType,
+ "oldContent": oldContent,
+ "newContent": newContent,
+ },
+ }],
+ )
+ except Exception as e:
+ return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=str(e))
+
+ registry.register(
+ "deleteFolder", _deleteFolder,
+ description="Delete a folder from the local workspace. Set recursive=true to delete all contents.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "folderId": {"type": "string", "description": "The folder ID to delete"},
+ "recursive": {"type": "boolean", "description": "If true, delete folder and all contents (files and subfolders). Default: false"},
+ },
+ "required": ["folderId"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "renameFolder", _renameFolder,
+ description="Rename a folder in the local workspace.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "folderId": {"type": "string", "description": "The folder ID to rename"},
+ "newName": {"type": "string", "description": "New folder name"},
+ },
+ "required": ["folderId", "newName"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "moveFolder", _moveFolder,
+ description="Move a folder to a different parent in the local workspace.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "folderId": {"type": "string", "description": "The folder ID to move"},
+ "targetParentId": {"type": "string", "description": "Target parent folder ID (null/omit for root)"},
+ },
+ "required": ["folderId"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "copyFile", _copyFile,
+ description="Create an independent copy of a file in the local workspace.",
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to copy"},
+ "targetFolderId": {"type": "string", "description": "Target folder for the copy (default: same folder)"},
+ "newFileName": {"type": "string", "description": "New file name (default: same name, auto-numbered if duplicate)"},
+ },
+ "required": ["fileId"]
+ },
+ readOnly=False
+ )
+
+ registry.register(
+ "replaceInFile", _replaceInFile,
+ description=(
+ "Replace specific text in an existing file. The edit is shown to the user for "
+ "review (accept/reject) before being applied. Provide enough surrounding context "
+ "in oldText to make the match unique (at least 2-3 lines). "
+ "Use readFile or searchInFileContent first to identify the exact text to replace."
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "fileId": {"type": "string", "description": "The file ID to edit"},
+ "oldText": {"type": "string", "description": "Exact text to find and replace (must be unique unless replaceAll=true)"},
+ "newText": {"type": "string", "description": "The replacement text"},
+ "replaceAll": {"type": "boolean", "description": "Replace all occurrences (default: false)"},
+ },
+ "required": ["fileId", "oldText", "newText"]
+ },
+ readOnly=False
+ )
+
diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/registerCore.py b/modules/serviceCenter/services/serviceAgent/coreTools/registerCore.py
new file mode 100644
index 00000000..234da7d8
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/coreTools/registerCore.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""Orchestrator: registers all core agent tools by delegating to domain modules."""
+
+from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
+
+from modules.serviceCenter.services.serviceAgent.coreTools._workspaceTools import _registerWorkspaceTools
+from modules.serviceCenter.services.serviceAgent.coreTools._connectionTools import _registerConnectionTools
+from modules.serviceCenter.services.serviceAgent.coreTools._dataSourceTools import _registerDataSourceTools
+from modules.serviceCenter.services.serviceAgent.coreTools._documentTools import _registerDocumentTools
+from modules.serviceCenter.services.serviceAgent.coreTools._mediaTools import _registerMediaTools
+from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import _registerFeatureSubAgentTools
+from modules.serviceCenter.services.serviceAgent.coreTools._crossWorkflowTools import _registerCrossWorkflowTools
+
+
+def registerCoreTools(registry: ToolRegistry, services):
+ """Register all built-in core tools on the agent ToolRegistry.
+
+ Delegates to domain-specific modules under coreTools/.
+ """
+ _registerWorkspaceTools(registry, services)
+ _registerConnectionTools(registry, services)
+ _registerDataSourceTools(registry, services)
+ _registerDocumentTools(registry, services)
+ _registerMediaTools(registry, services)
+ _registerFeatureSubAgentTools(registry, services)
+ _registerCrossWorkflowTools(registry, services)
diff --git a/modules/serviceCenter/services/serviceAgent/datamodelAgent.py b/modules/serviceCenter/services/serviceAgent/datamodelAgent.py
index f682e705..053569b0 100644
--- a/modules/serviceCenter/services/serviceAgent/datamodelAgent.py
+++ b/modules/serviceCenter/services/serviceAgent/datamodelAgent.py
@@ -6,6 +6,7 @@ from typing import List, Dict, Any, Optional
from enum import Enum
from pydantic import BaseModel, Field
from modules.shared.timeUtils import getUtcTimestamp
+from modules.datamodels.datamodelAi import OperationTypeEnum
import uuid
@@ -88,7 +89,10 @@ class AgentConfig(BaseModel):
maxRounds: int = Field(default=25, ge=1, le=100)
maxCostCHF: Optional[float] = Field(default=None, ge=0.0)
toolSet: str = Field(default="core")
+ initialToolboxes: List[str] = Field(default_factory=lambda: ["core"])
+ availableToolboxes: List[str] = Field(default_factory=list)
temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0)
+ operationType: Optional[OperationTypeEnum] = Field(default=None, description="Override the default AGENT operationType for model selection")
class AgentState(BaseModel):
diff --git a/modules/serviceCenter/services/serviceAgent/featureDataAgent.py b/modules/serviceCenter/services/serviceAgent/featureDataAgent.py
index 8ef0bfcc..43dbb9d7 100644
--- a/modules/serviceCenter/services/serviceAgent/featureDataAgent.py
+++ b/modules/serviceCenter/services/serviceAgent/featureDataAgent.py
@@ -21,11 +21,12 @@ from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
)
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.featureDataProvider import FeatureDataProvider
+from modules.shared.i18nRegistry import resolveText
logger = logging.getLogger(__name__)
-_MAX_ROUNDS = 5
-_MAX_COST_CHF = 0.10
+_MAX_ROUNDS = 8
+_MAX_COST_CHF = 0.15
async def runFeatureDataAgent(
@@ -39,6 +40,7 @@ async def runFeatureDataAgent(
dbConnector,
instanceLabel: str = "",
tableFilters: Optional[Dict[str, Dict[str, str]]] = None,
+ requestLang: Optional[str] = None,
) -> str:
"""Run the feature data sub-agent and return the textual result.
@@ -53,6 +55,7 @@ async def runFeatureDataAgent(
dbConnector: DatabaseConnector for queries.
instanceLabel: Human-readable instance name for context.
tableFilters: Per-table record filters from FeatureDataSource.recordFilter.
+ requestLang: ISO 639-1 code for resolving multilingual table labels in the schema prompt.
Returns:
Plain-text answer produced by the sub-agent.
@@ -69,25 +72,37 @@ async def runFeatureDataAgent(
if realCols:
meta["fields"] = realCols
- schemaContext = _buildSchemaContext(featureCode, instanceLabel, selectedTables)
- prompt = f"{schemaContext}\n\nUser question:\n{question}"
+ systemPrompt = _buildSchemaContext(featureCode, instanceLabel, selectedTables, requestLang)
- config = AgentConfig(maxRounds=_MAX_ROUNDS, maxCostCHF=_MAX_COST_CHF)
+ config = AgentConfig(
+ maxRounds=_MAX_ROUNDS,
+ maxCostCHF=_MAX_COST_CHF,
+ operationType=OperationTypeEnum.DATA_QUERY,
+ )
+
+ costAccumulator = 0.0
+
+ async def _trackingAiCallFn(req):
+ nonlocal costAccumulator
+ resp = await aiCallFn(req)
+ costAccumulator += resp.priceCHF
+ return resp
async def _getWorkflowCost() -> float:
- return 0.0
+ return costAccumulator
result = ""
async for event in runAgentLoop(
- prompt=prompt,
+ prompt=question,
toolRegistry=registry,
config=config,
- aiCallFn=aiCallFn,
+ aiCallFn=_trackingAiCallFn,
getWorkflowCostFn=_getWorkflowCost,
workflowId=f"fda-{featureInstanceId[:8]}",
userId=userId,
featureInstanceId=featureInstanceId,
mandateId=mandateId,
+ systemPromptOverride=systemPrompt,
):
if event.type == AgentEventTypeEnum.FINAL and event.content:
result = event.content
@@ -168,6 +183,53 @@ def _buildSubAgentTools(
error=result.get("error"),
)
+ async def _aggregateTable(args: Dict[str, Any], context: Dict[str, Any]):
+ tableName = args.get("tableName", "")
+ aggregate = args.get("aggregate", "")
+ field = args.get("field", "")
+ groupBy = args.get("groupBy")
+ if not tableName:
+ return ToolResult(toolCallId="", toolName="aggregateTable", success=False, error="tableName required")
+ if not aggregate:
+ return ToolResult(toolCallId="", toolName="aggregateTable", success=False, error="aggregate required (SUM, COUNT, AVG, MIN, MAX)")
+ if not field:
+ return ToolResult(toolCallId="", toolName="aggregateTable", success=False, error="field required")
+ result = provider.aggregateTable(
+ tableName=tableName,
+ featureInstanceId=featureInstanceId,
+ mandateId=mandateId,
+ aggregate=aggregate,
+ field=field,
+ groupBy=groupBy,
+ extraFilters=_recordFilterToList(tableName),
+ )
+ return ToolResult(
+ toolCallId="", toolName="aggregateTable",
+ success="error" not in result,
+ data=json.dumps(result, default=str, ensure_ascii=False)[:30000],
+ error=result.get("error"),
+ )
+
+ registry.register(
+ "aggregateTable", _aggregateTable,
+ description=(
+ "Run an aggregate query on a feature data table. "
+ "Supports SUM, COUNT, AVG, MIN, MAX with optional GROUP BY. "
+ "Example: aggregateTable(tableName='TrusteeDataJournalLine', aggregate='SUM', field='debitAmount', groupBy='costCenter')"
+ ),
+ parameters={
+ "type": "object",
+ "properties": {
+ "tableName": {"type": "string", "description": "Name of the table to aggregate"},
+ "aggregate": {"type": "string", "enum": ["SUM", "COUNT", "AVG", "MIN", "MAX"], "description": "Aggregate function"},
+ "field": {"type": "string", "description": "Field to aggregate (e.g. debitAmount, creditAmount)"},
+ "groupBy": {"type": "string", "description": "Optional field to group by (e.g. costCenter, accountNumber)"},
+ },
+ "required": ["tableName", "aggregate", "field"],
+ },
+ readOnly=True,
+ )
+
registry.register(
"browseTable", _browseTable,
description="List rows from a feature data table with pagination.",
@@ -234,33 +296,45 @@ def _buildSchemaContext(
featureCode: str,
instanceLabel: str,
selectedTables: List[Dict[str, Any]],
+ requestLang: Optional[str] = None,
) -> str:
- """Build a system-level context block describing available tables."""
- parts = [
- f"You are a data query assistant for the '{featureCode}' feature",
- ]
- if instanceLabel:
- parts[0] += f' (instance: "{instanceLabel}")'
- parts[0] += "."
- parts.append(
- "You have access to the following data tables. "
- "Use browseTable to list rows and queryTable to filter/search."
- )
- parts.append("")
+ """Build a system prompt describing available tables and query strategy."""
+ tableNames = []
+ tableBlocks = []
for obj in selectedTables:
meta = obj.get("meta", {})
tbl = meta.get("table", "?")
fields = meta.get("fields", [])
- label = obj.get("label", {})
- labelStr = label.get("en") or label.get("de") or tbl
- parts.append(f"Table: {tbl} ({labelStr})")
+ labelStr = resolveText(obj.get("label"), requestLang)
+ tableNames.append(tbl)
+ block = f" Table: {tbl} ({labelStr})"
if fields:
- parts.append(f" Fields: {', '.join(fields)}")
- parts.append("")
+ block += f"\n Fields: {', '.join(fields)}"
+ tableBlocks.append(block)
- parts.append(
- "Answer the user's question using the data from these tables. "
- "Be precise, cite row counts, and format data clearly."
- )
+ header = f"You are a data query assistant for the '{featureCode}' feature"
+ if instanceLabel:
+ header += f' (instance: "{instanceLabel}")'
+ header += "."
+
+ parts = [
+ header,
+ "",
+ "AVAILABLE TABLES (use EXACTLY these names as tableName parameter):",
+ *tableBlocks,
+ "",
+ f"Valid tableName values: {tableNames}",
+ "Field names are plain column names (e.g. 'accountNumber', 'periodYear').",
+ "",
+ "QUERY STRATEGY:",
+ "1. If unsure about columns, call browseTable(tableName) first to inspect the schema.",
+ "2. Use queryTable with filters for targeted lookups.",
+ "3. Use aggregateTable for SUM/COUNT/AVG/MIN/MAX with optional GROUP BY.",
+ "4. Combine what you need into as few tool calls as possible.",
+ "",
+ "RULES:",
+ "- Do NOT invent table or field names. Do NOT prefix fields with UUIDs or dots.",
+ "- Answer concisely. Cite row counts and key values.",
+ ]
return "\n".join(parts)
diff --git a/modules/serviceCenter/services/serviceAgent/featureDataProvider.py b/modules/serviceCenter/services/serviceAgent/featureDataProvider.py
index 25a0ff95..872e47eb 100644
--- a/modules/serviceCenter/services/serviceAgent/featureDataProvider.py
+++ b/modules/serviceCenter/services/serviceAgent/featureDataProvider.py
@@ -10,11 +10,52 @@ and mandateId so data isolation is guaranteed.
import logging
import json
+import os
+import time
+from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
+_DEBUG_DIR = Path("D:/Athi/Local/Web/poweron/local/debug")
+
+
+def _isDebugEnabled() -> bool:
+ try:
+ from modules.shared.configuration import APP_CONFIG
+ val = APP_CONFIG.get("APP_LOGGING_FILE_ENABLED", False)
+ return val is True or str(val).lower() == "true"
+ except Exception:
+ return False
+
+
+def _debugQueryLog(method: str, tableName: str, params: dict, result: dict, elapsed: float):
+ """Append query + result to local/debug/debug_queryTable.log."""
+ if not _isDebugEnabled():
+ return
+ debugDir = _DEBUG_DIR
+ try:
+ debugDir.mkdir(parents=True, exist_ok=True)
+ logPath = debugDir / "debug_queryTable.log"
+ ts = time.strftime("%Y-%m-%d %H:%M:%S")
+ rows = result.get("rows", [])
+ total = result.get("total", len(rows))
+ err = result.get("error")
+ header = f"[{ts}] {method}({tableName}) — {len(rows)} rows returned, total={total}, elapsed={elapsed:.0f}ms"
+ if err:
+ header += f", ERROR={err}"
+ lines = [header]
+ lines.append(f" params: {json.dumps(params, ensure_ascii=False, default=str)}")
+ for i, row in enumerate(rows):
+ lines.append(f" [{i}] {json.dumps(row, ensure_ascii=False, default=str)}")
+ lines.append("")
+ with open(logPath, "a", encoding="utf-8") as f:
+ f.write("\n".join(lines) + "\n")
+ except Exception:
+ pass
+
_ALLOWED_OPERATORS = {"=", "!=", ">", "<", ">=", "<=", "LIKE", "ILIKE", "IS NULL", "IS NOT NULL"}
+_ALLOWED_AGGREGATES = {"SUM", "COUNT", "AVG", "MIN", "MAX"}
class FeatureDataProvider:
@@ -77,6 +118,15 @@ class FeatureDataProvider:
"""
_validateTableName(tableName)
conn = self._db.connection
+
+ if fields:
+ invalid = [f for f in fields if not _isValidIdentifier(f)]
+ if invalid:
+ return {
+ "rows": [], "total": 0, "limit": limit, "offset": offset,
+ "error": f"Invalid field name(s): {', '.join(invalid)}. Use getActualColumns to discover valid column names.",
+ }
+
scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn)
extraWhere, extraParams = _buildFilterClauses(extraFilters)
@@ -86,6 +136,7 @@ class FeatureDataProvider:
fullWhere += " AND " + extraWhere
allParams.extend(extraParams)
+ t0 = time.time()
try:
with conn.cursor() as cur:
countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {fullWhere}'
@@ -101,10 +152,96 @@ class FeatureDataProvider:
cur.execute(dataSql, allParams + [limit, offset])
rows = [_serializeRow(dict(r)) for r in cur.fetchall()]
- return {"rows": rows, "total": total, "limit": limit, "offset": offset}
+ result = {"rows": rows, "total": total, "limit": limit, "offset": offset}
+ _debugQueryLog("browseTable", tableName, {
+ "fields": fields, "limit": limit, "offset": offset,
+ }, result, (time.time() - t0) * 1000)
+ return result
except Exception as e:
logger.error(f"browseTable({tableName}) failed: {e}")
- return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
+ elapsed = (time.time() - t0) * 1000
+ errResult = {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
+ _debugQueryLog("browseTable", tableName, {
+ "fields": fields, "limit": limit, "offset": offset,
+ }, errResult, elapsed)
+ try:
+ conn.rollback()
+ except Exception:
+ pass
+ return errResult
+
+ def aggregateTable(
+ self,
+ tableName: str,
+ featureInstanceId: str,
+ mandateId: str,
+ aggregate: str,
+ field: str,
+ groupBy: str = None,
+ extraFilters: Optional[List[Dict[str, Any]]] = None,
+ ) -> Dict[str, Any]:
+ """Run an aggregate query (SUM, COUNT, AVG, MIN, MAX) on a feature table.
+
+ Returns ``{"rows": [{"groupValue": ..., "result": ...}], "aggregate": ..., "field": ..., "groupBy": ...}``.
+ """
+ _validateTableName(tableName)
+ aggregate = aggregate.upper()
+ if aggregate not in _ALLOWED_AGGREGATES:
+ return {"rows": [], "error": f"Unsupported aggregate: {aggregate}. Allowed: {', '.join(sorted(_ALLOWED_AGGREGATES))}"}
+ if not _isValidIdentifier(field):
+ return {"rows": [], "error": f"Invalid field name: {field}"}
+ if groupBy and not _isValidIdentifier(groupBy):
+ return {"rows": [], "error": f"Invalid groupBy field: {groupBy}"}
+
+ conn = self._db.connection
+ scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn)
+ extraWhere, extraParams = _buildFilterClauses(extraFilters)
+
+ fullWhere = scopeFilter["where"]
+ allParams = list(scopeFilter["params"])
+ if extraWhere:
+ fullWhere += " AND " + extraWhere
+ allParams.extend(extraParams)
+
+ t0 = time.time()
+ try:
+ with conn.cursor() as cur:
+ if groupBy:
+ sql = (
+ f'SELECT "{groupBy}" AS "groupValue", {aggregate}("{field}") AS "result" '
+ f'FROM "{tableName}" WHERE {fullWhere} '
+ f'GROUP BY "{groupBy}" ORDER BY "result" DESC'
+ )
+ else:
+ sql = (
+ f'SELECT {aggregate}("{field}") AS "result" '
+ f'FROM "{tableName}" WHERE {fullWhere}'
+ )
+ cur.execute(sql, allParams)
+ rows = [_serializeRow(dict(r)) for r in cur.fetchall()]
+
+ result = {
+ "rows": rows,
+ "aggregate": aggregate,
+ "field": field,
+ "groupBy": groupBy,
+ }
+ _debugQueryLog("aggregateTable", tableName, {
+ "aggregate": aggregate, "field": field, "groupBy": groupBy,
+ }, result, (time.time() - t0) * 1000)
+ return result
+ except Exception as e:
+ logger.error(f"aggregateTable({tableName}, {aggregate}({field})) failed: {e}")
+ elapsed = (time.time() - t0) * 1000
+ errResult = {"rows": [], "error": str(e), "aggregate": aggregate, "field": field, "groupBy": groupBy}
+ _debugQueryLog("aggregateTable", tableName, {
+ "aggregate": aggregate, "field": field, "groupBy": groupBy,
+ }, errResult, elapsed)
+ try:
+ conn.rollback()
+ except Exception:
+ pass
+ return errResult
def queryTable(
self,
@@ -125,6 +262,15 @@ class FeatureDataProvider:
"""
_validateTableName(tableName)
conn = self._db.connection
+
+ if fields:
+ invalid = [f for f in fields if not _isValidIdentifier(f)]
+ if invalid:
+ return {
+ "rows": [], "total": 0, "limit": limit, "offset": offset,
+ "error": f"Invalid field name(s): {', '.join(invalid)}. Use getActualColumns to discover valid column names.",
+ }
+
scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn)
combinedFilters = list(filters or []) + list(extraFilters or [])
@@ -136,6 +282,7 @@ class FeatureDataProvider:
fullWhere += " AND " + extraWhere
allParams.extend(extraParams)
+ t0 = time.time()
try:
with conn.cursor() as cur:
countSql = f'SELECT COUNT(*) FROM "{tableName}" WHERE {fullWhere}'
@@ -151,10 +298,25 @@ class FeatureDataProvider:
cur.execute(dataSql, allParams + [limit, offset])
rows = [_serializeRow(dict(r)) for r in cur.fetchall()]
- return {"rows": rows, "total": total, "limit": limit, "offset": offset}
+ result = {"rows": rows, "total": total, "limit": limit, "offset": offset}
+ _debugQueryLog("queryTable", tableName, {
+ "filters": filters, "fields": fields, "orderBy": orderBy,
+ "limit": limit, "offset": offset,
+ }, result, (time.time() - t0) * 1000)
+ return result
except Exception as e:
logger.error(f"queryTable({tableName}) failed: {e}")
- return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
+ elapsed = (time.time() - t0) * 1000
+ errResult = {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)}
+ _debugQueryLog("queryTable", tableName, {
+ "filters": filters, "fields": fields, "orderBy": orderBy,
+ "limit": limit, "offset": offset,
+ }, errResult, elapsed)
+ try:
+ conn.rollback()
+ except Exception:
+ pass
+ return errResult
# ------------------------------------------------------------------
diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py
index b370b827..b24d9fcb 100644
--- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py
+++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py
@@ -14,6 +14,7 @@ from modules.serviceCenter.services.serviceAgent.datamodelAgent import (
from modules.serviceCenter.services.serviceAgent.toolRegistry import ToolRegistry
from modules.serviceCenter.services.serviceAgent.agentLoop import runAgentLoop
from modules.serviceCenter.services.serviceAgent.actionToolAdapter import ActionToolAdapter
+from modules.serviceCenter.services.serviceAgent.coreTools import registerCoreTools
from modules.serviceCenter.services.serviceBilling.mainServiceBilling import (
getService as getBillingService,
InsufficientBalanceException,
@@ -22,44 +23,6 @@ from modules.serviceCenter.services.serviceBilling.mainServiceBilling import (
logger = logging.getLogger(__name__)
-_MAX_TOOL_RESULT_CHARS = 50_000
-
-_BINARY_SIGNATURES = (b"%PDF", b"\x89PNG", b"\xff\xd8\xff", b"GIF8", b"PK\x03\x04", b"Rar!", b"\x1f\x8b")
-
-
-def _resolveFileScope(fileId: str, context: dict) -> tuple:
- """Resolve featureInstanceId and mandateId for a file from context or management DB.
-
- Returns (featureInstanceId, mandateId) — never None, always strings.
- """
- fiId = context.get("featureInstanceId", "") or ""
- mId = context.get("mandateId", "") or ""
- if fiId and mId:
- return fiId, mId
- try:
- from modules.datamodels.datamodelFiles import FileItem
- from modules.interfaces.interfaceDbManagement import ComponentObjects
- fm = ComponentObjects().db._loadRecord(FileItem, fileId)
- if fm:
- _get = (lambda k: fm.get(k, "")) if isinstance(fm, dict) else (lambda k: getattr(fm, k, ""))
- fiId = fiId or str(_get("featureInstanceId") or "")
- mId = mId or str(_get("mandateId") or "")
- except Exception:
- pass
- return fiId, mId
-
-
-def _looksLikeBinary(data: bytes, sampleSize: int = 1024) -> bool:
- """Detect binary content by checking for magic bytes and non-printable char ratio."""
- if any(data[:8].startswith(sig) for sig in _BINARY_SIGNATURES):
- return True
- sample = data[:sampleSize]
- if not sample:
- return False
- nonPrintable = sum(1 for b in sample if b < 0x09 or (0x0E <= b < 0x20 and b != 0x1B))
- return nonPrintable / len(sample) > 0.10
-
-
class _ServicesAdapter:
"""Adapter providing service access from (context, get_service)."""
@@ -158,7 +121,7 @@ class AgentService:
if workflowId is None:
workflowId = getattr(self.services.workflow, "id", "unknown") if self.services.workflow else "unknown"
- resolvedLanguage = userLanguage or getattr(self.services.user, "language", "") or "de"
+ resolvedLanguage = userLanguage or ""
enrichedPrompt = await self._enrichPromptWithFiles(prompt, fileIds)
@@ -212,8 +175,8 @@ class AgentService:
try:
from modules.interfaces.interfaceDbKnowledge import getInterface as _getKnowledgeInterface
knowledgeDb = _getKnowledgeInterface()
- except Exception:
- pass
+ except Exception as e:
+ logger.warning(f"Knowledge DB interface unavailable: {e}")
fileDescriptions = []
for fid in fileIds:
@@ -298,10 +261,10 @@ class AgentService:
return prompt
def _buildToolRegistry(self, config: AgentConfig) -> ToolRegistry:
- """Build a tool registry with core tools and ActionToolAdapter tools."""
+ """Build a tool registry with core tools, ActionToolAdapter tools, and toolbox-activated tools."""
registry = ToolRegistry()
- _registerCoreTools(registry, self.services)
+ registerCoreTools(registry, self.services)
try:
from modules.workflows.processing.core.actionExecutor import ActionExecutor
@@ -311,8 +274,130 @@ class AgentService:
except Exception as e:
logger.warning(f"Could not register action tools: {e}")
+ self._activateToolboxes(registry, config)
+ self._registerRequestToolbox(registry)
+
return registry
+ def _activateToolboxes(self, registry: ToolRegistry, config: AgentConfig) -> None:
+ """Activate toolboxes dynamically based on user connections and config.
+
+ For each active toolbox, marks already-registered tools as belonging to
+ that toolbox (via toolSet tag) so the agent loop can filter them.
+ The 'workflow' toolbox is special: its tools are registered from
+ workflowTools module because they have dedicated handlers.
+ """
+ try:
+ from modules.serviceCenter.services.serviceAgent.toolboxRegistry import getToolboxRegistry
+ tbRegistry = getToolboxRegistry()
+
+ userConnections: List[str] = []
+ try:
+ connectionService = self._getService("connection")
+ if connectionService and hasattr(connectionService, "getConnections"):
+ connections = connectionService.getConnections() or []
+ userConnections = [c.get("authority", "") for c in connections if c.get("authority")]
+ except Exception as e:
+ logger.debug("Could not resolve user connections for toolbox activation: %s", e)
+
+ activeToolboxes = tbRegistry.getActiveToolboxes(userConnections)
+ activatedIds = [tb.id for tb in activeToolboxes]
+ logger.info("Toolbox activation: connections=%s -> active toolboxes=%s", userConnections, activatedIds)
+
+ activeToolNames: set = set()
+ for tb in activeToolboxes:
+ activeToolNames.update(tb.tools)
+
+ for tb in activeToolboxes:
+ if tb.id == "workflow":
+ try:
+ from modules.serviceCenter.services.serviceAgent.workflowTools import getWorkflowToolDefinitions
+ from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolDefinition
+ wfDefs = getWorkflowToolDefinitions()
+ for rawDef in wfDefs:
+ handler = rawDef.get("handler")
+ defFields = {k: v for k, v in rawDef.items() if k != "handler"}
+ toolDef = ToolDefinition(**defFields)
+ registry.registerFromDefinition(toolDef, handler)
+ logger.info("Registered %d workflow tools from toolbox", len(wfDefs))
+ except Exception as e:
+ logger.warning("Could not register workflow tools: %s", e)
+
+ inactiveToolNames = set()
+ for tb in tbRegistry.getAllToolboxes():
+ if tb.id not in activatedIds:
+ inactiveToolNames.update(tb.tools)
+ inactiveToolNames -= activeToolNames
+
+ for toolName in inactiveToolNames:
+ registry.unregister(toolName)
+
+ logger.debug("Toolbox activation: %d active tools, %d inactive tools removed", len(activeToolNames), len(inactiveToolNames))
+ except Exception as e:
+ logger.warning("Toolbox activation failed: %s", e)
+
+ def _registerRequestToolbox(self, registry: ToolRegistry) -> None:
+ """Register the requestToolbox meta-tool that lets the agent dynamically activate toolboxes."""
+ try:
+ from modules.serviceCenter.services.serviceAgent.toolboxRegistry import (
+ getToolboxRegistry, buildRequestToolboxDefinition, REQUEST_TOOLBOX_TOOL_NAME,
+ )
+ from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+
+ tbRegistry = getToolboxRegistry()
+ allIds = [tb.id for tb in tbRegistry.getAllToolboxes()]
+ registeredNames = set(registry.getToolNames())
+ inactiveIds = [tbId for tbId in allIds if not any(
+ t in registeredNames for t in (tbRegistry.getToolbox(tbId).tools if tbRegistry.getToolbox(tbId) else [])
+ )]
+
+ if not inactiveIds:
+ return
+
+ toolDef = buildRequestToolboxDefinition(inactiveIds)
+
+ async def _handler(args: Dict[str, Any], context: Dict[str, Any] = None) -> ToolResult:
+ toolboxId = args.get("toolboxId", "")
+ reason = args.get("reason", "")
+ tb = tbRegistry.getToolbox(toolboxId)
+ if not tb:
+ return ToolResult(
+ toolCallId="", toolName=REQUEST_TOOLBOX_TOOL_NAME,
+ success=False, error=f"Unknown toolbox: {toolboxId}",
+ )
+ activatedCount = 0
+ for toolName in tb.tools:
+ if registry.isValidTool(toolName):
+ activatedCount += 1
+ continue
+ try:
+ from modules.serviceCenter.services.serviceAgent.actionToolAdapter import ActionToolAdapter
+ adapter = ActionToolAdapter(self._getService("actionExecutor"))
+ adapter.registerAll(registry)
+ if registry.isValidTool(toolName):
+ activatedCount += 1
+ logger.info("requestToolbox: re-registered tool '%s' from toolbox '%s'", toolName, toolboxId)
+ else:
+ logger.warning("requestToolbox: tool '%s' from toolbox '%s' could not be registered", toolName, toolboxId)
+ except Exception as regErr:
+ logger.warning("requestToolbox: failed to register tool '%s': %s", toolName, regErr)
+ logger.info("requestToolbox: activated toolbox '%s' (%d/%d tools). Reason: %s", toolboxId, activatedCount, len(tb.tools), reason)
+ return ToolResult(
+ toolCallId="", toolName=REQUEST_TOOLBOX_TOOL_NAME,
+ success=True,
+ data=f"Toolbox '{tb.label}' activated with {activatedCount} tools. They are now available.",
+ )
+
+ registry.register(
+ name=REQUEST_TOOLBOX_TOOL_NAME,
+ handler=_handler,
+ description=toolDef["description"],
+ parameters=toolDef["parameters"],
+ )
+ logger.info("Registered requestToolbox meta-tool (inactive toolboxes: %s)", inactiveIds)
+ except Exception as e:
+ logger.warning("Could not register requestToolbox meta-tool: %s", e)
+
async def _persistTrace(self, workflowId: str, summaryData: Dict[str, Any]):
"""Persist the agent trace and workflow artifacts in the knowledge store."""
try:
@@ -501,2942 +586,3 @@ def _buildWorkflowHintItems(
countLabel += f" (showing 10 newest)"
items.insert(0, {"key": countLabel, "value": "use listWorkflowHistory to browse"})
return items
-
-
-def _getOrCreateTempFolder(chatService) -> Optional[str]:
- """Return the ID of the root-level 'Temp' folder, creating it if it doesn't exist."""
- try:
- allFolders = chatService.interfaceDbComponent.listFolders()
- tempFolder = next(
- (f for f in allFolders
- if f.get("name") == "Temp" and not f.get("parentId")),
- None,
- )
- if tempFolder:
- return tempFolder.get("id")
- newFolder = chatService.interfaceDbComponent.createFolder("Temp", parentId=None)
- return newFolder.get("id") if newFolder else None
- except Exception as e:
- logger.warning(f"Could not get/create Temp folder: {e}")
- return None
-
-
-def _registerCoreTools(registry: ToolRegistry, services):
- """Register built-in core tools: file operations, search, and folder management."""
- import uuid as _uuid
- from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
-
- # ---- Read-only tools ----
-
- def _applyOffsetLimit(text: str, offset: int = None, limit: int = None) -> str:
- """Apply line-based offset/limit to text content, returning numbered lines."""
- if offset is None and limit is None:
- return None
- lines = text.split("\n")
- totalLines = len(lines)
- startLine = max(0, (offset or 1) - 1)
- endLine = min(totalLines, startLine + (limit or 200))
- selected = lines[startLine:endLine]
- numbered = "\n".join(f"{i + startLine + 1}|{line}" for i, line in enumerate(selected))
- header = f"[Lines {startLine + 1}-{endLine} of {totalLines} total]\n"
- return header + numbered
-
- async def _readFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- offset = args.get("offset")
- limit = args.get("limit")
- if not fileId:
- return ToolResult(toolCallId="", toolName="readFile", success=False, error="fileId is required")
- try:
- knowledgeService = services.getService("knowledge") if hasattr(services, "getService") else None
-
- # 1) Knowledge Store: return already-extracted text chunks
- if knowledgeService:
- fileStatus = knowledgeService.getFileStatus(fileId)
- if fileStatus == "indexed":
- chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
- textChunks = [
- c for c in (chunks or [])
- if c.get("contentType") != "image" and c.get("data")
- ]
- if textChunks:
- assembled = "\n\n".join(c["data"] for c in textChunks)
- chunked = _applyOffsetLimit(assembled, offset, limit)
- if chunked is not None:
- return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
- if len(assembled) > _MAX_TOOL_RESULT_CHARS:
- assembled = assembled[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated – showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(assembled)}. Use offset/limit to read specific sections.]"
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data=assembled,
- )
- elif fileStatus in ("processing", "embedding", "extracted"):
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data=f"[File {fileId} is currently being processed (status: {fileStatus}). Try again shortly.]",
- )
-
- # 2) Not indexed yet: try on-demand extraction
- chatService = services.chat
- fileInfo = chatService.getFileInfo(fileId)
- if not fileInfo:
- return ToolResult(toolCallId="", toolName="readFile", success=True, data="File not found.")
-
- fileName = fileInfo.get("fileName", fileId)
- mimeType = fileInfo.get("mimeType", "")
-
- _BINARY_TYPES = ("application/pdf", "image/", "application/vnd.", "application/zip",
- "application/x-zip", "application/x-tar", "application/x-7z",
- "application/msword", "application/octet-stream",
- "message/rfc822")
- isBinary = any(mimeType.startswith(t) for t in _BINARY_TYPES)
-
- rawBytes = chatService.getFileData(fileId)
- if not rawBytes:
- return ToolResult(toolCallId="", toolName="readFile", success=True, data="File data not accessible.")
-
- if not isBinary:
- isBinary = _looksLikeBinary(rawBytes)
-
- if isBinary:
- try:
- from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry, ChunkerRegistry
- from modules.serviceCenter.services.serviceExtraction.subPipeline import runExtraction
- from modules.datamodels.datamodelExtraction import ExtractionOptions
-
- extracted = runExtraction(
- ExtractorRegistry(), ChunkerRegistry(),
- rawBytes, fileName, mimeType, ExtractionOptions(),
- )
-
- contentObjects = []
- for part in extracted.parts:
- tg = (part.typeGroup or "").lower()
- ct = "image" if tg == "image" else "text"
- if not part.data or not part.data.strip():
- continue
- contentObjects.append({
- "contentObjectId": part.id,
- "contentType": ct,
- "data": part.data,
- "contextRef": {
- "containerPath": fileName,
- "location": part.label or "file",
- **(part.metadata or {}),
- },
- })
-
- if contentObjects:
- if knowledgeService:
- try:
- userId = context.get("userId", "")
- _fiId, _mId = _resolveFileScope(fileId, context)
- await knowledgeService.indexFile(
- fileId=fileId, fileName=fileName, mimeType=mimeType,
- userId=userId, contentObjects=contentObjects,
- featureInstanceId=_fiId,
- mandateId=_mId,
- )
- except Exception:
- pass
-
- joined = ""
- if knowledgeService:
- _chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
- _textChunks = [
- c for c in (_chunks or [])
- if c.get("contentType") != "image" and c.get("data")
- ]
- if _textChunks:
- joined = "\n\n".join(c["data"] for c in _textChunks)
- if not joined:
- textParts = [o["data"] for o in contentObjects if o["contentType"] != "image"]
- joined = "\n\n".join(textParts) if textParts else ""
- if joined:
- chunked = _applyOffsetLimit(joined, offset, limit)
- if chunked is not None:
- return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
- if len(joined) > _MAX_TOOL_RESULT_CHARS:
- joined = joined[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated – showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(joined)}. Use offset/limit to read specific sections.]"
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data=joined,
- )
- imgCount = sum(1 for o in contentObjects if o["contentType"] == "image")
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data=f"[Extracted {len(contentObjects)} content objects from '{fileName}' "
- f"({imgCount} images, no readable text). "
- f"Use describeImage(fileId='{fileId}') to analyze visual content.]",
- )
- except Exception as extractErr:
- logger.warning(f"readFile extraction failed for {fileId} ({fileName}): {extractErr}")
-
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data=f"[Binary file: '{fileName}', type={mimeType}, size={len(rawBytes)} bytes. "
- f"Text extraction not available. Use describeImage for images.]",
- )
-
- # 3) Text file: decode raw bytes
- for encoding in ("utf-8", "utf-8-sig", "latin-1"):
- try:
- text = rawBytes.decode(encoding)
- if text.strip():
- _fileNeedNeutralize = False
- try:
- from modules.datamodels.datamodelFiles import FileItem as _FI
- from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO
- _fRec = _CO().db._loadRecord(_FI, fileId)
- if _fRec:
- _fG = (lambda k, d=None: _fRec.get(k, d)) if isinstance(_fRec, dict) else (lambda k, d=None: getattr(_fRec, k, d))
- _fileNeedNeutralize = bool(_fG("neutralize", False))
- except Exception:
- pass
- if _fileNeedNeutralize:
- try:
- _nSvc = services.getService("neutralization") if hasattr(services, "getService") else None
- if _nSvc and hasattr(_nSvc, 'processTextAsync'):
- _nResult = await _nSvc.processTextAsync(text, fileId)
- if _nResult and _nResult.get("neutralized_text"):
- text = _nResult["neutralized_text"]
- logger.debug(f"readFile: neutralized text for file {fileId}")
- else:
- logger.warning(f"readFile: neutralization failed for file {fileId}, blocking text (fail-safe)")
- return ToolResult(toolCallId="", toolName="readFile", success=True,
- data="[File requires neutralization but neutralization failed. Content blocked for data protection.]")
- else:
- logger.warning(f"readFile: neutralization required but service unavailable for file {fileId}")
- return ToolResult(toolCallId="", toolName="readFile", success=True,
- data="[File requires neutralization but service unavailable. Content blocked for data protection.]")
- except Exception as _nErr:
- logger.error(f"readFile: neutralization error for file {fileId}: {_nErr}")
- return ToolResult(toolCallId="", toolName="readFile", success=True,
- data="[File requires neutralization but an error occurred. Content blocked for data protection.]")
- chunked = _applyOffsetLimit(text, offset, limit)
- if chunked is not None:
- return ToolResult(toolCallId="", toolName="readFile", success=True, data=chunked)
- if len(text) > _MAX_TOOL_RESULT_CHARS:
- text = text[:_MAX_TOOL_RESULT_CHARS] + f"\n\n[Truncated – showing first {_MAX_TOOL_RESULT_CHARS} chars of {len(text)}. Use offset/limit to read specific sections.]"
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data=text,
- )
- except (UnicodeDecodeError, ValueError):
- continue
-
- return ToolResult(
- toolCallId="", toolName="readFile", success=True,
- data="File is empty or could not be decoded.",
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="readFile", success=False, error=str(e))
-
- async def _listFiles(args: Dict[str, Any], context: Dict[str, Any]):
- try:
- chatService = services.chat
- files = chatService.listFiles(
- folderId=args.get("folderId"),
- tags=args.get("tags"),
- search=args.get("search"),
- )
- fileList = "\n".join(
- f"- {f.get('fileName', 'unknown')} (id: {f.get('id', '?')}, "
- f"type: {f.get('mimeType', '?')}, size: {f.get('fileSize', '?')}, "
- f"tags: {f.get('tags', [])}, status: {f.get('status', 'n/a')})"
- for f in files
- ) if files else "No files found."
- return ToolResult(toolCallId="", toolName="listFiles", success=True, data=fileList)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="listFiles", success=False, error=str(e))
-
- async def _searchInFileContent(args: Dict[str, Any], context: Dict[str, Any]):
- import re as _re
- fileId = args.get("fileId", "")
- query = args.get("query", "")
- contextLines = args.get("contextLines", 2)
- if not fileId or not query:
- return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error="fileId and query are required")
- try:
- chatService = services.chat
- rawBytes = chatService.getFileData(fileId)
- if not rawBytes:
- return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error="File data not accessible")
- try:
- content = rawBytes.decode("utf-8")
- except UnicodeDecodeError:
- content = rawBytes.decode("latin-1", errors="replace")
-
- lines = content.split("\n")
- pattern = _re.compile(_re.escape(query), _re.IGNORECASE)
- matches = []
- for i, line in enumerate(lines):
- if pattern.search(line):
- start = max(0, i - contextLines)
- end = min(len(lines), i + contextLines + 1)
- snippet = "\n".join(f"{j + 1}|{lines[j]}" for j in range(start, end))
- matches.append(snippet)
-
- if not matches:
- return ToolResult(toolCallId="", toolName="searchInFileContent", success=True,
- data=f"No matches for '{query}' in file.")
-
- shown = matches[:20]
- resultText = f"Found {len(matches)} match(es) for '{query}':\n\n" + "\n---\n".join(shown)
- if len(matches) > 20:
- resultText += f"\n\n... and {len(matches) - 20} more matches"
- return ToolResult(toolCallId="", toolName="searchInFileContent", success=True, data=resultText)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="searchInFileContent", success=False, error=str(e))
-
- async def _listFolders(args: Dict[str, Any], context: Dict[str, Any]):
- try:
- chatService = services.chat
- folders = chatService.listFolders(parentId=args.get("parentId"))
- folderList = "\n".join(
- f"- {f.get('name', 'unnamed')} (id: {f.get('id', '?')})"
- for f in folders
- ) if folders else "No folders found."
- return ToolResult(toolCallId="", toolName="listFolders", success=True, data=folderList)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="listFolders", success=False, error=str(e))
-
- async def _webSearch(args: Dict[str, Any], context: Dict[str, Any]):
- query = args.get("query", "")
- if not query:
- return ToolResult(toolCallId="", toolName="webSearch", success=False, error="query is required")
- try:
- webService = services.getService("web")
- result = await webService.performWebResearch(
- prompt=query,
- urls=[],
- country=None,
- language=args.get("language"),
- )
- summary = result.get("summary", "") if isinstance(result, dict) else str(result)
- return ToolResult(
- toolCallId="", toolName="webSearch", success=True,
- data=summary or str(result)
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="webSearch", success=False, error=str(e))
-
- # ---- Write tools ----
-
- async def _tagFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- tags = args.get("tags", [])
- if not fileId:
- return ToolResult(toolCallId="", toolName="tagFile", success=False, error="fileId is required")
- try:
- chatService = services.chat
- chatService.interfaceDbComponent.updateFile(fileId, {"tags": tags})
- return ToolResult(
- toolCallId="", toolName="tagFile", success=True,
- data=f"Tags updated to {tags} for file {fileId}"
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="tagFile", success=False, error=str(e))
-
- async def _moveFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- targetFolderId = args.get("targetFolderId")
- if not fileId:
- return ToolResult(toolCallId="", toolName="moveFile", success=False, error="fileId is required")
- try:
- chatService = services.chat
- chatService.interfaceDbComponent.updateFile(fileId, {"folderId": targetFolderId})
- return ToolResult(
- toolCallId="", toolName="moveFile", success=True,
- data=f"File {fileId} moved to folder {targetFolderId or 'root'}"
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="moveFile", success=False, error=str(e))
-
- async def _createFolder(args: Dict[str, Any], context: Dict[str, Any]):
- name = args.get("name", "")
- if not name:
- return ToolResult(toolCallId="", toolName="createFolder", success=False, error="name is required")
- try:
- chatService = services.chat
- folder = chatService.createFolder(name=name, parentId=args.get("parentId"))
- return ToolResult(
- toolCallId="", toolName="createFolder", success=True,
- data=f"Folder '{name}' created (id: {folder.get('id', '?')})"
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="createFolder", success=False, error=str(e))
-
- async def _writeFile(args: Dict[str, Any], context: Dict[str, Any]):
- content = args.get("content", "")
- mode = args.get("mode", "create")
- fileId = args.get("fileId", "")
- name = args.get("name", "")
-
- if not content:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error="content is required")
-
- try:
- chatService = services.chat
- dbMgmt = chatService.interfaceDbComponent
-
- if mode == "append":
- if not fileId:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error="fileId is required for mode=append")
- file = dbMgmt.getFile(fileId)
- if not file:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error=f"File {fileId} not found")
- existingData = dbMgmt.getFileData(fileId) or b""
- try:
- existingText = existingData.decode("utf-8")
- except UnicodeDecodeError:
- existingText = existingData.decode("latin-1", errors="replace")
- newContent = existingText + content
- dbMgmt.updateFileData(fileId, newContent.encode("utf-8"))
- dbMgmt.updateFile(fileId, {"fileSize": len(newContent.encode("utf-8"))})
- return ToolResult(
- toolCallId="", toolName="writeFile", success=True,
- data=f"Appended {len(content)} chars to '{file.fileName}' (id: {fileId}, total: {len(newContent)} chars)",
- sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": file.fileName}}],
- )
-
- if mode == "overwrite":
- if not fileId:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error="fileId is required for mode=overwrite")
- file = dbMgmt.getFile(fileId)
- if not file:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error=f"File {fileId} not found")
- dbMgmt.updateFileData(fileId, content.encode("utf-8"))
- dbMgmt.updateFile(fileId, {"fileSize": len(content.encode("utf-8"))})
- return ToolResult(
- toolCallId="", toolName="writeFile", success=True,
- data=f"Overwritten '{file.fileName}' (id: {fileId}, {len(content)} chars)",
- sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": file.fileName}}],
- )
-
- # mode == "create" (default)
- if not name:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error="name is required for mode=create")
- fileItem, _ = dbMgmt.saveUploadedFile(content.encode("utf-8"), name)
- fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
- if fiId:
- dbMgmt.updateFile(fileItem.id, {"featureInstanceId": fiId})
- if args.get("folderId"):
- dbMgmt.updateFile(fileItem.id, {"folderId": args["folderId"]})
- if args.get("tags"):
- dbMgmt.updateFile(fileItem.id, {"tags": args["tags"]})
- return ToolResult(
- toolCallId="", toolName="writeFile", success=True,
- data=f"File '{name}' created (id: {fileItem.id})",
- sideEvents=[{
- "type": "fileCreated",
- "data": {
- "fileId": fileItem.id,
- "fileName": name,
- "mimeType": fileItem.mimeType,
- "fileSize": fileItem.fileSize,
- },
- }],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="writeFile", success=False, error=str(e))
-
- # ---- Register all tools ----
-
- registry.register(
- "readFile", _readFile,
- description=(
- "Read the content of a file. Returns full content by default. "
- "For large files, use offset and limit to read specific line ranges. "
- "When truncated, the response tells the total line count so you can paginate."
- ),
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to read"},
- "offset": {"type": "integer", "description": "Start reading from this line number (1-based). Omit for full file."},
- "limit": {"type": "integer", "description": "Max number of lines to return (default: all). Use with offset for chunked reading."},
- },
- "required": ["fileId"]
- },
- readOnly=True
- )
-
- registry.register(
- "listFiles", _listFiles,
- description=(
- "List files in the local workspace. Filter by folder, tags, or search term. "
- "For external data sources, use browseDataSource instead."
- ),
- parameters={
- "type": "object",
- "properties": {
- "folderId": {"type": "string", "description": "Filter by folder ID"},
- "tags": {"type": "array", "items": {"type": "string"}, "description": "Filter by tags (any match)"},
- "search": {"type": "string", "description": "Search in file names and descriptions"},
- }
- },
- readOnly=True
- )
-
- registry.register(
- "searchInFileContent", _searchInFileContent,
- description=(
- "Search for text within a file's content. Returns matching lines with context. "
- "Case-insensitive. Use to locate specific text before using replaceInFile, "
- "or to find relevant sections in a large file before reading with offset/limit."
- ),
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to search in"},
- "query": {"type": "string", "description": "Text to search for (case-insensitive)"},
- "contextLines": {"type": "integer", "description": "Number of context lines around each match (default: 2)"},
- },
- "required": ["fileId", "query"]
- },
- readOnly=True
- )
-
- registry.register(
- "listFolders", _listFolders,
- description="List folders in the local workspace. For external data sources, use browseDataSource instead.",
- parameters={
- "type": "object",
- "properties": {
- "parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
- }
- },
- readOnly=True
- )
-
- registry.register(
- "webSearch", _webSearch,
- description="Search the web for general information. Use readUrl to fetch content from a known URL instead.",
- parameters={
- "type": "object",
- "properties": {"query": {"type": "string", "description": "Search query"}},
- "required": ["query"]
- },
- readOnly=True
- )
-
- registry.register(
- "tagFile", _tagFile,
- description="Set or update tags on a file for categorization and filtering via listFiles.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID"},
- "tags": {"type": "array", "items": {"type": "string"}, "description": "Tags to set"},
- },
- "required": ["fileId", "tags"]
- },
- readOnly=False
- )
-
- registry.register(
- "moveFile", _moveFile,
- description="Move a file to a different folder in the local workspace.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to move"},
- "targetFolderId": {"type": "string", "description": "Target folder ID (null for root)"},
- },
- "required": ["fileId"]
- },
- readOnly=False
- )
-
- registry.register(
- "createFolder", _createFolder,
- description="Create a new folder in the local workspace.",
- parameters={
- "type": "object",
- "properties": {
- "name": {"type": "string", "description": "Folder name"},
- "parentId": {"type": "string", "description": "Parent folder ID (omit for root)"},
- },
- "required": ["name"]
- },
- readOnly=False
- )
-
- registry.register(
- "writeFile", _writeFile,
- description=(
- "Create, append, or overwrite a file. Modes:\n"
- "- create (default): create a new file (name required).\n"
- "- append: append content to an existing file (fileId required). "
- "Use for large content that exceeds a single tool call (~8000 chars per call).\n"
- "- overwrite: replace entire file content (fileId required)."
- ),
- parameters={
- "type": "object",
- "properties": {
- "name": {"type": "string", "description": "File name (required for mode=create)"},
- "content": {"type": "string", "description": "Content to write/append"},
- "mode": {"type": "string", "enum": ["create", "append", "overwrite"], "description": "Write mode (default: create)"},
- "fileId": {"type": "string", "description": "File ID (required for mode=append/overwrite)"},
- "folderId": {"type": "string", "description": "Target folder ID (mode=create only)"},
- "tags": {"type": "array", "items": {"type": "string"}, "description": "Tags (mode=create only)"},
- },
- "required": ["content"]
- },
- readOnly=False
- )
-
- # ---- Phase 1: deleteFile, renameFile, readUrl, translateText ----
-
- async def _deleteFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- if not fileId:
- return ToolResult(toolCallId="", toolName="deleteFile", success=False, error="fileId is required")
- try:
- chatService = services.chat
- file = chatService.interfaceDbComponent.getFile(fileId)
- if not file:
- return ToolResult(toolCallId="", toolName="deleteFile", success=False, error=f"File {fileId} not found")
- fileName = file.fileName
- try:
- knowledgeService = services.getService("knowledge")
- if knowledgeService and hasattr(knowledgeService, "removeFile"):
- knowledgeService.removeFile(fileId)
- except Exception:
- pass
- chatService.interfaceDbComponent.deleteFile(fileId)
- return ToolResult(
- toolCallId="", toolName="deleteFile", success=True,
- data=f"File '{fileName}' (id: {fileId}) deleted",
- sideEvents=[{"type": "fileDeleted", "data": {"fileId": fileId, "fileName": fileName}}],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="deleteFile", success=False, error=str(e))
-
- async def _renameFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- newName = args.get("newName", "")
- if not fileId or not newName:
- return ToolResult(toolCallId="", toolName="renameFile", success=False, error="fileId and newName are required")
- try:
- chatService = services.chat
- chatService.interfaceDbComponent.updateFile(fileId, {"fileName": newName})
- return ToolResult(
- toolCallId="", toolName="renameFile", success=True,
- data=f"File {fileId} renamed to '{newName}'",
- sideEvents=[{"type": "fileUpdated", "data": {"fileId": fileId, "fileName": newName}}],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="renameFile", success=False, error=str(e))
-
- async def _readUrl(args: Dict[str, Any], context: Dict[str, Any]):
- url = args.get("url", "")
- if not url:
- return ToolResult(toolCallId="", toolName="readUrl", success=False, error="url is required")
- try:
- webService = services.getService("web")
- result = await webService._performWebCrawl(
- instruction="Extract all content from this page",
- urls=[url],
- maxDepth=1,
- maxWidth=1,
- )
- if isinstance(result, list) and result:
- content = "\n\n".join(
- item.get("content", "") or item.get("text", "") or str(item)
- for item in result if item
- )
- elif isinstance(result, dict):
- content = result.get("content", "") or result.get("summary", "") or str(result)
- else:
- content = str(result) if result else "No content retrieved"
- _MAX = 30000
- if len(content) > _MAX:
- content = content[:_MAX] + f"\n\n... (truncated at {_MAX} chars)"
- return ToolResult(toolCallId="", toolName="readUrl", success=True, data=content)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="readUrl", success=False, error=str(e))
-
- async def _translateText(args: Dict[str, Any], context: Dict[str, Any]):
- text = args.get("text", "")
- targetLanguage = args.get("targetLanguage", "")
- if not text or not targetLanguage:
- return ToolResult(toolCallId="", toolName="translateText", success=False, error="text and targetLanguage are required")
- try:
- from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- mandateId = context.get("mandateId", "")
- voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
- sourceLanguage = args.get("sourceLanguage", "auto")
- result = await voiceInterface.translateText(text, sourceLanguage=sourceLanguage, targetLanguage=targetLanguage)
- if result and result.get("success"):
- translated = result.get("translated_text", "")
- return ToolResult(toolCallId="", toolName="translateText", success=True, data=translated)
- return ToolResult(toolCallId="", toolName="translateText", success=False, error=result.get("error", "Translation failed"))
- except Exception as e:
- return ToolResult(toolCallId="", toolName="translateText", success=False, error=str(e))
-
- registry.register(
- "deleteFile", _deleteFile,
- description="Permanently delete a file from the local workspace.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to delete"},
- },
- "required": ["fileId"]
- },
- readOnly=False
- )
-
- registry.register(
- "renameFile", _renameFile,
- description="Rename a file in the local workspace. Include the file extension in the new name.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to rename"},
- "newName": {"type": "string", "description": "New file name including extension"},
- },
- "required": ["fileId", "newName"]
- },
- readOnly=False
- )
-
- registry.register(
- "readUrl", _readUrl,
- description=(
- "Read and extract content from a specific URL. "
- "Use when the user provides a specific URL to read, or when you need to fetch content from a known web page. "
- "For general information searches, use webSearch instead."
- ),
- parameters={
- "type": "object",
- "properties": {
- "url": {"type": "string", "description": "The URL to read"},
- },
- "required": ["url"]
- },
- readOnly=True
- )
-
- registry.register(
- "translateText", _translateText,
- description=(
- "Translate text to a target language using Google Cloud Translation. "
- "More efficient than AI translation for large text volumes. "
- "Use ISO language codes (e.g. 'en', 'de', 'fr', 'es', 'it', 'pt', 'zh', 'ja', 'ko', 'ar')."
- ),
- parameters={
- "type": "object",
- "properties": {
- "text": {"type": "string", "description": "Text to translate"},
- "targetLanguage": {"type": "string", "description": "Target language ISO code (e.g. 'en', 'de', 'fr')"},
- "sourceLanguage": {"type": "string", "description": "Source language ISO code (default: auto-detect)"},
- },
- "required": ["text", "targetLanguage"]
- },
- readOnly=True
- )
-
- # ---- Phase 2: deleteFolder, renameFolder, moveFolder, copyFile, editFile ----
-
- async def _deleteFolder(args: Dict[str, Any], context: Dict[str, Any]):
- folderId = args.get("folderId", "")
- recursive = args.get("recursive", False)
- if not folderId:
- return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error="folderId is required")
- try:
- chatService = services.chat
- result = chatService.interfaceDbComponent.deleteFolder(folderId, recursive=recursive)
- summary = f"Deleted {result.get('deletedFolders', 1)} folder(s) and {result.get('deletedFiles', 0)} file(s)"
- return ToolResult(
- toolCallId="", toolName="deleteFolder", success=True, data=summary,
- sideEvents=[{"type": "folderDeleted", "data": {"folderId": folderId, **result}}],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="deleteFolder", success=False, error=str(e))
-
- async def _renameFolder(args: Dict[str, Any], context: Dict[str, Any]):
- folderId = args.get("folderId", "")
- newName = args.get("newName", "")
- if not folderId or not newName:
- return ToolResult(toolCallId="", toolName="renameFolder", success=False, error="folderId and newName are required")
- try:
- chatService = services.chat
- chatService.interfaceDbComponent.renameFolder(folderId, newName)
- return ToolResult(
- toolCallId="", toolName="renameFolder", success=True,
- data=f"Folder {folderId} renamed to '{newName}'",
- sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "name": newName}}],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="renameFolder", success=False, error=str(e))
-
- async def _moveFolder(args: Dict[str, Any], context: Dict[str, Any]):
- folderId = args.get("folderId", "")
- targetParentId = args.get("targetParentId")
- if not folderId:
- return ToolResult(toolCallId="", toolName="moveFolder", success=False, error="folderId is required")
- try:
- chatService = services.chat
- chatService.interfaceDbComponent.moveFolder(folderId, targetParentId)
- return ToolResult(
- toolCallId="", toolName="moveFolder", success=True,
- data=f"Folder {folderId} moved to {targetParentId or 'root'}",
- sideEvents=[{"type": "folderUpdated", "data": {"folderId": folderId, "parentId": targetParentId}}],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="moveFolder", success=False, error=str(e))
-
- async def _copyFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- if not fileId:
- return ToolResult(toolCallId="", toolName="copyFile", success=False, error="fileId is required")
- try:
- chatService = services.chat
- copiedFile = chatService.interfaceDbComponent.copyFile(
- fileId,
- targetFolderId=args.get("targetFolderId"),
- newFileName=args.get("newFileName"),
- )
- return ToolResult(
- toolCallId="", toolName="copyFile", success=True,
- data=f"File copied as '{copiedFile.fileName}' (id: {copiedFile.id})",
- sideEvents=[{
- "type": "fileCreated",
- "data": {"fileId": copiedFile.id, "fileName": copiedFile.fileName,
- "mimeType": copiedFile.mimeType, "fileSize": copiedFile.fileSize},
- }],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="copyFile", success=False, error=str(e))
-
- async def _replaceInFile(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- oldText = args.get("oldText", "")
- newText = args.get("newText", "")
- replaceAll = args.get("replaceAll", False)
- if not fileId or not oldText:
- return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="fileId and oldText are required")
- try:
- chatService = services.chat
- dbMgmt = chatService.interfaceDbComponent
- file = dbMgmt.getFile(fileId)
- if not file:
- return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=f"File {fileId} not found")
- if not dbMgmt.isTextMimeType(file.mimeType):
- return ToolResult(
- toolCallId="", toolName="replaceInFile", success=False,
- error=f"Cannot edit binary file ({file.mimeType}). Only text-based files are supported."
- )
- rawData = dbMgmt.getFileData(fileId)
- if not rawData:
- return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="File has no content")
- try:
- oldContent = rawData.decode("utf-8")
- except UnicodeDecodeError:
- return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error="File content is not valid UTF-8 text")
-
- count = oldContent.count(oldText)
- if count == 0:
- return ToolResult(
- toolCallId="", toolName="replaceInFile", success=False,
- error="oldText not found in file. Use readFile or searchInFileContent to verify the exact text."
- )
- if count > 1 and not replaceAll:
- return ToolResult(
- toolCallId="", toolName="replaceInFile", success=False,
- error=f"oldText found {count} times. Set replaceAll=true or provide more surrounding context to make it unique."
- )
-
- newContent = oldContent.replace(oldText, newText) if replaceAll else oldContent.replace(oldText, newText, 1)
-
- editId = str(_uuid.uuid4())
- label = f"all {count} occurrences" if replaceAll else "1 occurrence"
- return ToolResult(
- toolCallId="", toolName="replaceInFile", success=True,
- data=f"Edit proposed for '{file.fileName}': replaced {label}. Waiting for user review.",
- sideEvents=[{
- "type": "fileEditProposal",
- "data": {
- "id": editId,
- "fileId": fileId,
- "fileName": file.fileName,
- "mimeType": file.mimeType,
- "oldContent": oldContent,
- "newContent": newContent,
- },
- }],
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="replaceInFile", success=False, error=str(e))
-
- registry.register(
- "deleteFolder", _deleteFolder,
- description="Delete a folder from the local workspace. Set recursive=true to delete all contents.",
- parameters={
- "type": "object",
- "properties": {
- "folderId": {"type": "string", "description": "The folder ID to delete"},
- "recursive": {"type": "boolean", "description": "If true, delete folder and all contents (files and subfolders). Default: false"},
- },
- "required": ["folderId"]
- },
- readOnly=False
- )
-
- registry.register(
- "renameFolder", _renameFolder,
- description="Rename a folder in the local workspace.",
- parameters={
- "type": "object",
- "properties": {
- "folderId": {"type": "string", "description": "The folder ID to rename"},
- "newName": {"type": "string", "description": "New folder name"},
- },
- "required": ["folderId", "newName"]
- },
- readOnly=False
- )
-
- registry.register(
- "moveFolder", _moveFolder,
- description="Move a folder to a different parent in the local workspace.",
- parameters={
- "type": "object",
- "properties": {
- "folderId": {"type": "string", "description": "The folder ID to move"},
- "targetParentId": {"type": "string", "description": "Target parent folder ID (null/omit for root)"},
- },
- "required": ["folderId"]
- },
- readOnly=False
- )
-
- registry.register(
- "copyFile", _copyFile,
- description="Create an independent copy of a file in the local workspace.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to copy"},
- "targetFolderId": {"type": "string", "description": "Target folder for the copy (default: same folder)"},
- "newFileName": {"type": "string", "description": "New file name (default: same name, auto-numbered if duplicate)"},
- },
- "required": ["fileId"]
- },
- readOnly=False
- )
-
- registry.register(
- "replaceInFile", _replaceInFile,
- description=(
- "Replace specific text in an existing file. The edit is shown to the user for "
- "review (accept/reject) before being applied. Provide enough surrounding context "
- "in oldText to make the match unique (at least 2-3 lines). "
- "Use readFile or searchInFileContent first to identify the exact text to replace."
- ),
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID to edit"},
- "oldText": {"type": "string", "description": "Exact text to find and replace (must be unique unless replaceAll=true)"},
- "newText": {"type": "string", "description": "The replacement text"},
- "replaceAll": {"type": "boolean", "description": "Replace all occurrences (default: false)"},
- },
- "required": ["fileId", "oldText", "newText"]
- },
- readOnly=False
- )
-
- # ---- Connection tools (external data sources) ----
-
- def _buildResolverDb():
- """Build a DB adapter that ConnectorResolver can use to load UserConnections.
- interfaceDbApp has getUserConnectionById; ConnectorResolver expects getUserConnection."""
- chatService = services.chat
- appIf = getattr(chatService, "interfaceDbApp", None)
- if appIf and hasattr(appIf, "getUserConnectionById"):
- class _Adapter:
- def __init__(self, app):
- self._app = app
- def getUserConnection(self, connectionId: str):
- return self._app.getUserConnectionById(connectionId)
- return _Adapter(appIf)
- return getattr(chatService, "interfaceDbComponent", None)
-
- async def _listConnections(args: Dict[str, Any], context: Dict[str, Any]):
- try:
- chatService = services.chat
- connections = chatService.getUserConnections() if hasattr(chatService, "getUserConnections") else []
- if not connections:
- return ToolResult(toolCallId="", toolName="listConnections", success=True, data="No connections available.")
- lines = []
- for conn in connections:
- connId = conn.get("id", "?") if isinstance(conn, dict) else getattr(conn, "id", "?")
- authority = conn.get("authority", "?") if isinstance(conn, dict) else getattr(conn, "authority", "?")
- email = conn.get("externalEmail", "") if isinstance(conn, dict) else getattr(conn, "externalEmail", "")
- lines.append(f"- {authority} ({email}) id: {connId}")
- return ToolResult(toolCallId="", toolName="listConnections", success=True, data="\n".join(lines))
- except Exception as e:
- return ToolResult(toolCallId="", toolName="listConnections", success=False, error=str(e))
-
- async def _uploadToExternal(args: Dict[str, Any], context: Dict[str, Any]):
- connectionId = args.get("connectionId", "")
- service = args.get("service", "")
- path = args.get("path", "")
- fileId = args.get("fileId", "")
- if not connectionId or not service or not path or not fileId:
- return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error="connectionId, service, path, and fileId are required")
- try:
- from modules.connectors.connectorResolver import ConnectorResolver
- resolver = ConnectorResolver(
- services.getService("security"),
- _buildResolverDb(),
- )
- adapter = await resolver.resolveService(connectionId, service)
- chatService = services.chat
- fileContent = chatService.getFileContent(fileId)
- if not fileContent:
- return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error="File not found")
- fileData = fileContent.get("data", b"") if isinstance(fileContent, dict) else b""
- if isinstance(fileData, str):
- fileData = fileData.encode("utf-8")
- fileName = fileContent.get("fileName", "file") if isinstance(fileContent, dict) else "file"
- result = await adapter.upload(path, fileData, fileName)
- return ToolResult(toolCallId="", toolName="uploadToExternal", success=True, data=str(result))
- except Exception as e:
- return ToolResult(toolCallId="", toolName="uploadToExternal", success=False, error=str(e))
-
- async def _sendMail(args: Dict[str, Any], context: Dict[str, Any]):
- import base64 as _b64
-
- connectionId = args.get("connectionId", "")
- to = args.get("to", [])
- subject = args.get("subject", "")
- body = args.get("body", "")
- bodyType = "HTML" if args.get("bodyType", "text").lower() == "html" else "Text"
- draft = args.get("draft", False)
- attachmentFileIds = args.get("attachmentFileIds") or []
-
- if not connectionId or not to or not subject:
- return ToolResult(toolCallId="", toolName="sendMail", success=False, error="connectionId, to, and subject are required")
- try:
- graphAttachments: List[Dict[str, Any]] = []
- if attachmentFileIds:
- chatService = services.chat
- dbMgmt = chatService.interfaceDbComponent
- for fid in attachmentFileIds:
- fileRow = dbMgmt.getFile(fid)
- if not fileRow:
- return ToolResult(toolCallId="", toolName="sendMail", success=False, error=f"Attachment file not found: {fid}")
- rawBytes = dbMgmt.getFileData(fid)
- if not rawBytes:
- return ToolResult(toolCallId="", toolName="sendMail", success=False, error=f"Attachment file has no data: {fid}")
- graphAttachments.append({
- "name": fileRow.fileName,
- "contentBytes": _b64.b64encode(rawBytes).decode("ascii"),
- "contentType": getattr(fileRow, "mimeType", "application/octet-stream"),
- })
-
- from modules.connectors.connectorResolver import ConnectorResolver
- resolver = ConnectorResolver(
- services.getService("security"),
- _buildResolverDb(),
- )
- adapter = await resolver.resolveService(connectionId, "outlook")
-
- if draft and hasattr(adapter, "createDraft"):
- result = await adapter.createDraft(
- to=to, subject=subject, body=body, bodyType=bodyType,
- cc=args.get("cc"), attachments=graphAttachments or None,
- )
- return ToolResult(toolCallId="", toolName="sendMail", success=True, data=str(result))
-
- if hasattr(adapter, "sendMail"):
- result = await adapter.sendMail(
- to=to, subject=subject, body=body, bodyType=bodyType,
- cc=args.get("cc"), attachments=graphAttachments or None,
- )
- return ToolResult(toolCallId="", toolName="sendMail", success=True, data=str(result))
- return ToolResult(toolCallId="", toolName="sendMail", success=False, error="Mail not supported by this adapter")
- except Exception as e:
- return ToolResult(toolCallId="", toolName="sendMail", success=False, error=str(e))
-
- _connToolParams = {
- "connectionId": {"type": "string", "description": "UserConnection ID"},
- "service": {"type": "string", "description": "Service name (sharepoint, outlook, drive, etc.)"},
- }
-
- registry.register(
- "listConnections", _listConnections,
- description="List the user's external connections (SharePoint, OneDrive, Outlook, etc.) and their IDs. Use with browseDataSource/uploadToExternal.",
- parameters={"type": "object", "properties": {}},
- readOnly=True,
- )
-
- registry.register(
- "uploadToExternal", _uploadToExternal,
- description=(
- "Upload a local file to an external storage via connectionId+service. "
- "Use listConnections to find available connections."
- ),
- parameters={
- "type": "object",
- "properties": {
- **_connToolParams,
- "path": {"type": "string", "description": "Destination path on the external service"},
- "fileId": {"type": "string", "description": "Local file ID to upload"},
- },
- "required": ["connectionId", "service", "path", "fileId"],
- },
- readOnly=False,
- )
-
- registry.register(
- "sendMail", _sendMail,
- description=(
- "Send or draft an email via a connected mail service (Outlook). "
- "Supports HTML body and file attachments from the workspace. "
- "Set draft=true to save as draft without sending. "
- "Use listConnections to find the connectionId."
- ),
- parameters={
- "type": "object",
- "properties": {
- "connectionId": {"type": "string", "description": "UserConnection ID"},
- "to": {"type": "array", "items": {"type": "string"}, "description": "Recipient email addresses"},
- "subject": {"type": "string", "description": "Email subject"},
- "body": {"type": "string", "description": "Email body — plain text or HTML markup"},
- "bodyType": {"type": "string", "enum": ["text", "html"], "description": "Body format: 'text' (default) or 'html'"},
- "cc": {"type": "array", "items": {"type": "string"}, "description": "CC addresses"},
- "attachmentFileIds": {
- "type": "array", "items": {"type": "string"},
- "description": "File IDs from the workspace to attach (use listFiles to find IDs)",
- },
- "draft": {"type": "boolean", "description": "If true, save as draft in Drafts folder instead of sending"},
- },
- "required": ["connectionId", "to", "subject", "body"],
- },
- readOnly=False,
- )
-
- # ---- DataSource convenience tools ----
- _SOURCE_TYPE_TO_SERVICE = {
- "sharepointFolder": "sharepoint",
- "onedriveFolder": "onedrive",
- "outlookFolder": "outlook",
- "googleDriveFolder": "drive",
- "gmailFolder": "gmail",
- "ftpFolder": "files",
- "clickupList": "clickup",
- }
-
- async def _resolveDataSource(dsId: str):
- """Resolve a DataSource record and return (connectionId, service, path, neutralize) or raise."""
- chatService = services.chat
- ds = chatService.getDataSource(dsId) if hasattr(chatService, "getDataSource") else None
- if not ds:
- raise ValueError(f"DataSource '{dsId}' not found")
- connectionId = ds.get("connectionId", "")
- sourceType = ds.get("sourceType", "")
- path = ds.get("path", "/")
- label = ds.get("label", "")
- neutralize = bool(ds.get("neutralize", False))
- service = _SOURCE_TYPE_TO_SERVICE.get(sourceType, sourceType)
- if not connectionId:
- raise ValueError(f"DataSource '{dsId}' has no connectionId")
- logger.info(f"Resolved DataSource '{dsId}' ({label}): sourceType={sourceType}, service={service}, connectionId={connectionId}, path={path[:80]}, neutralize={neutralize}")
- return connectionId, service, path, neutralize
-
- _MAIL_SERVICES = {"outlook", "gmail"}
-
- async def _browseDataSource(args: Dict[str, Any], context: Dict[str, Any]):
- dsId = args.get("dataSourceId", "")
- subPath = args.get("subPath", "")
- directConnId = args.get("connectionId", "")
- directService = args.get("service", "")
- if not dsId and not (directConnId and directService):
- return ToolResult(toolCallId="", toolName="browseDataSource", success=False,
- error="Provide either dataSourceId OR connectionId+service")
- try:
- if dsId:
- connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId)
- else:
- connectionId, service, basePath = directConnId, directService, args.get("path", "/")
- if subPath:
- if subPath.startswith("/"):
- browsePath = subPath
- else:
- browsePath = f"{basePath.rstrip('/')}/{subPath}"
- else:
- browsePath = basePath
- from modules.connectors.connectorResolver import ConnectorResolver
- resolver = ConnectorResolver(
- services.getService("security"),
- _buildResolverDb(),
- )
- adapter = await resolver.resolveService(connectionId, service)
- entries = await adapter.browse(browsePath, filter=args.get("filter"))
- if not entries:
- return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data="Empty directory.")
- lines = []
- for e in entries:
- prefix = "[DIR]" if e.isFolder else "[FILE]"
- sizeInfo = f" ({e.size} bytes)" if e.size else ""
- lines.append(f"- {prefix} {e.name}{sizeInfo} path: {e.path}")
- result = "\n".join(lines)
- if service in _MAIL_SERVICES:
- result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
- return ToolResult(toolCallId="", toolName="browseDataSource", success=True, data=result)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="browseDataSource", success=False, error=str(e))
-
- async def _searchDataSource(args: Dict[str, Any], context: Dict[str, Any]):
- dsId = args.get("dataSourceId", "")
- directConnId = args.get("connectionId", "")
- directService = args.get("service", "")
- query = args.get("query", "")
- if not query:
- return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error="query is required")
- if not dsId and not (directConnId and directService):
- return ToolResult(toolCallId="", toolName="searchDataSource", success=False,
- error="Provide either dataSourceId OR connectionId+service")
- try:
- if dsId:
- connectionId, service, basePath, _neutralize = await _resolveDataSource(dsId)
- else:
- connectionId, service, basePath = directConnId, directService, args.get("path", "/")
- from modules.connectors.connectorResolver import ConnectorResolver
- resolver = ConnectorResolver(
- services.getService("security"),
- _buildResolverDb(),
- )
- adapter = await resolver.resolveService(connectionId, service)
- entries = await adapter.search(query, path=basePath)
- if not entries:
- return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data="No results found.")
- lines = [f"- {e.name} (path: {e.path})" for e in entries]
- result = "\n".join(lines)
- if service in _MAIL_SERVICES:
- result += "\n\nIMPORTANT: These are email subjects only. To read the full email content, use downloadFromDataSource with the path, then readFile on the returned file ID."
- return ToolResult(toolCallId="", toolName="searchDataSource", success=True, data=result)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="searchDataSource", success=False, error=str(e))
-
- async def _downloadFromDataSource(args: Dict[str, Any], context: Dict[str, Any]):
- dsId = args.get("dataSourceId", "")
- directConnId = args.get("connectionId", "")
- directService = args.get("service", "")
- filePath = args.get("filePath", "")
- fileName = args.get("fileName", "")
- if not filePath:
- return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="filePath is required")
- if not dsId and not (directConnId and directService):
- return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False,
- error="Provide either dataSourceId OR connectionId+service")
- try:
- from modules.connectors.connectorResolver import ConnectorResolver
- from modules.connectors.connectorProviderBase import DownloadResult as _DR
- _sourceNeutralize = False
- if dsId:
- connectionId, service, basePath, _sourceNeutralize = await _resolveDataSource(dsId)
- else:
- connectionId, service, basePath = directConnId, directService, "/"
- fullPath = filePath if filePath.startswith("/") else f"{basePath.rstrip('/')}/{filePath}"
- resolver = ConnectorResolver(
- services.getService("security"),
- _buildResolverDb(),
- )
- adapter = await resolver.resolveService(connectionId, service)
- result = await adapter.download(fullPath)
-
- if isinstance(result, _DR):
- fileBytes = result.data
- fileName = result.fileName or fileName
- else:
- fileBytes = result
-
- if not fileBytes:
- return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error="Download returned empty")
-
- if not fileName or "." not in fileName:
- pathSegment = fullPath.split("/")[-1] or "downloaded_file"
- fileName = fileName or pathSegment
- if "." not in fileName:
- try:
- entries = await adapter.browse(basePath)
- for entry in entries:
- if getattr(entry, "path", "") == filePath or getattr(entry, "path", "").endswith(filePath):
- if "." in entry.name:
- fileName = entry.name
- break
- except Exception:
- pass
- if "." not in fileName:
- if fileBytes[:4] == b"%PDF":
- fileName = f"{fileName}.pdf"
- elif fileBytes[:2] == b"PK":
- fileName = f"{fileName}.zip"
- chatService = services.chat
- fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(fileBytes, fileName)
- fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
- if fiId:
- chatService.interfaceDbComponent.updateFile(fileItem.id, {"featureInstanceId": fiId})
- if _sourceNeutralize:
- chatService.interfaceDbComponent.updateFile(fileItem.id, {"neutralize": True})
- tempFolderId = _getOrCreateTempFolder(chatService)
- if tempFolderId:
- chatService.interfaceDbComponent.updateFile(fileItem.id, {"folderId": tempFolderId})
- ext = fileName.rsplit(".", 1)[-1].lower() if "." in fileName else ""
- hint = "Use readFile to read the text content." if ext in ("doc", "docx", "txt", "csv", "json", "xml", "html", "md", "rtf", "odt", "xls", "xlsx", "pptx", "pdf", "eml", "msg") else "Use readFile to access the content."
- return ToolResult(
- toolCallId="", toolName="downloadFromDataSource", success=True,
- data=f"Downloaded '{fileName}' ({len(fileBytes)} bytes) → local file id: {fileItem.id}. {hint}"
- )
- except Exception as e:
- return ToolResult(toolCallId="", toolName="downloadFromDataSource", success=False, error=str(e))
-
- registry.register(
- "browseDataSource", _browseDataSource,
- description=(
- "Browse files and folders in a data source. Accepts either:\n"
- "- dataSourceId (for attached data sources shown in the prompt), OR\n"
- "- connectionId + service (for direct connection access via listConnections)."
- ),
- parameters={
- "type": "object",
- "properties": {
- "dataSourceId": {"type": "string", "description": "DataSource ID (from attached data sources)"},
- "connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
- "service": {"type": "string", "description": "Service name (alternative to dataSourceId, e.g. sharepoint, onedrive)"},
- "path": {"type": "string", "description": "Root path (used with connectionId+service)"},
- "subPath": {"type": "string", "description": "Sub-path within the data source to browse"},
- "filter": {"type": "string", "description": "Filter pattern (e.g. '*.pdf')"},
- },
- },
- readOnly=True,
- )
-
- registry.register(
- "searchDataSource", _searchDataSource,
- description=(
- "Search for files within a data source. Accepts either dataSourceId OR connectionId+service."
- ),
- parameters={
- "type": "object",
- "properties": {
- "dataSourceId": {"type": "string", "description": "DataSource ID"},
- "connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
- "service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
- "path": {"type": "string", "description": "Scope path (used with connectionId+service)"},
- "query": {"type": "string", "description": "Search query"},
- },
- "required": ["query"],
- },
- readOnly=True,
- )
-
- registry.register(
- "downloadFromDataSource", _downloadFromDataSource,
- description=(
- "Download a file or email from a data source into local storage. Returns a local file ID "
- "to read with readFile. Accepts either dataSourceId OR connectionId+service. "
- "For email sources (Outlook, Gmail), browse/search only return subjects -- use this to get full content."
- ),
- parameters={
- "type": "object",
- "properties": {
- "dataSourceId": {"type": "string", "description": "DataSource ID"},
- "connectionId": {"type": "string", "description": "UserConnection ID (alternative to dataSourceId)"},
- "service": {"type": "string", "description": "Service name (alternative to dataSourceId)"},
- "filePath": {"type": "string", "description": "Path of the file to download (from browseDataSource results)"},
- "fileName": {"type": "string", "description": "File name with extension (e.g. 'report.pdf')"},
- },
- "required": ["dataSourceId", "filePath"],
- },
- readOnly=False,
- )
-
- # ---- Document tools (Smart Documents / Container Handling) ----
-
- async def _browseContainer(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- if not fileId:
- return ToolResult(toolCallId="", toolName="browseContainer", success=False, error="fileId is required")
- try:
- knowledgeService = services.getService("knowledge")
- index = knowledgeService.getFileContentIndex(fileId)
- if not index:
- return ToolResult(toolCallId="", toolName="browseContainer", success=True, data="No content index available for this file. It may not have been indexed yet.")
- structure = index.get("structure", {}) if isinstance(index, dict) else {}
- objectSummary = index.get("objectSummary", []) if isinstance(index, dict) else []
- totalObjects = index.get("totalObjects", 0) if isinstance(index, dict) else 0
-
- result = f"File: {index.get('fileName', '?')} ({index.get('mimeType', '?')})\n"
- result += f"Total content objects: {totalObjects}\n"
-
- sections = structure.get("sections", [])
- if sections:
- result += "\nSections:\n"
- for s in sections:
- result += f" [{s.get('id', '?')}] {s.get('title', 'Untitled')} (pages {s.get('startPage', '?')}-{s.get('endPage', '?')})\n"
-
- if structure.get("pageMap"):
- pages = len(structure["pageMap"])
- result += f"\nPages: {pages}\n"
- imgCount = structure.get("imageCount", 0)
- tableCount = structure.get("tableCount", 0)
- if imgCount:
- result += f"Images: {imgCount}\n"
- if tableCount:
- result += f"Tables: {tableCount}\n"
-
- if structure.get("sheetMap"):
- result += "\nSheets:\n"
- for s in structure["sheetMap"]:
- result += f" {s.get('sheetName', '?')} ({s.get('rows', '?')} rows x {s.get('columns', '?')} cols)\n"
-
- if structure.get("slideMap"):
- result += "\nSlides:\n"
- for s in structure["slideMap"]:
- result += f" Slide {s.get('slideIndex', 0) + 1}: {s.get('title', '(no title)')}\n"
-
- return ToolResult(toolCallId="", toolName="browseContainer", success=True, data=result)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="browseContainer", success=False, error=str(e))
-
- async def _readContentObjects(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- if not fileId:
- return ToolResult(toolCallId="", toolName="readContentObjects", success=False, error="fileId is required")
- try:
- knowledgeService = services.getService("knowledge")
- filterDict = {}
- if args.get("pageIndex") is not None:
- filterDict["pageIndex"] = args["pageIndex"]
- if args.get("contentType"):
- filterDict["contentType"] = args["contentType"]
- if args.get("sectionId"):
- filterDict["sectionId"] = args["sectionId"]
-
- objects = await knowledgeService.readContentObjects(fileId, filterDict)
- if not objects:
- return ToolResult(toolCallId="", toolName="readContentObjects", success=True, data="No content objects found with the given filter.")
-
- result = f"Found {len(objects)} content objects:\n\n"
- for obj in objects[:20]:
- data = obj.get("data", "")
- cType = obj.get("contentType", "?")
- ref = obj.get("contextRef", {})
- location = ref.get("location", "") if isinstance(ref, dict) else ""
- preview = data[:300] if cType == "text" else f"[{cType} data, {len(data)} chars]"
- result += f"[{cType}] {location}: {preview}\n\n"
-
- if len(objects) > 20:
- result += f"... and {len(objects) - 20} more objects"
-
- return ToolResult(toolCallId="", toolName="readContentObjects", success=True, data=result)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="readContentObjects", success=False, error=str(e))
-
- async def _extractContainerItem(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- containerPath = args.get("containerPath", "")
- if not fileId or not containerPath:
- return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error="fileId and containerPath are required")
- try:
- knowledgeService = services.getService("knowledge")
- result = await knowledgeService.extractContainerItem(fileId, containerPath)
- if result:
- return ToolResult(toolCallId="", toolName="extractContainerItem", success=True, data=str(result))
- return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error=f"Item '{containerPath}' not found in container index for file {fileId}. On-demand extraction is not yet implemented.")
- except Exception as e:
- return ToolResult(toolCallId="", toolName="extractContainerItem", success=False, error=str(e))
-
- async def _summarizeContent(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- if not fileId:
- return ToolResult(toolCallId="", toolName="summarizeContent", success=False, error="fileId is required")
- try:
- knowledgeService = services.getService("knowledge")
- filterDict = {}
- if args.get("sectionId"):
- filterDict["sectionId"] = args["sectionId"]
- if args.get("pageIndex") is not None:
- filterDict["pageIndex"] = args["pageIndex"]
- if args.get("contentType"):
- filterDict["contentType"] = args["contentType"]
-
- objects = await knowledgeService.readContentObjects(fileId, filterDict)
- if not objects:
- return ToolResult(toolCallId="", toolName="summarizeContent", success=True, data="No content found to summarize.")
-
- textParts = [obj.get("data", "") for obj in objects if obj.get("contentType") != "image"]
- combinedText = "\n\n".join(textParts)[:6000]
-
- aiService = services.ai
- from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum
- summaryRequest = AiCallRequest(
- prompt=f"Summarize the following content concisely:\n\n{combinedText}",
- options=AiCallOptions(operationType=OperationTypeEnum.DATA_ANALYSE),
- )
- response = await aiService.callAi(summaryRequest)
- return ToolResult(toolCallId="", toolName="summarizeContent", success=True, data=response.content)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="summarizeContent", success=False, error=str(e))
-
- registry.register(
- "browseContainer", _browseContainer,
- description="Browse the structural index of a document (pages, sections, sheets, slides). Use before readContentObjects for targeted reading.",
- parameters={
- "type": "object",
- "properties": {"fileId": {"type": "string", "description": "The file ID to browse"}},
- "required": ["fileId"],
- },
- readOnly=True,
- )
-
- registry.register(
- "readContentObjects", _readContentObjects,
- description="Read extracted content objects from a file, optionally filtered by page, section, or type. Use browseContainer first to see the structure.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID"},
- "pageIndex": {"type": "integer", "description": "Filter by page index"},
- "sectionId": {"type": "string", "description": "Filter by section ID"},
- "contentType": {"type": "string", "description": "Filter by content type (text, image, etc.)"},
- },
- "required": ["fileId"],
- },
- readOnly=True,
- )
-
- registry.register(
- "extractContainerItem", _extractContainerItem,
- description="Extract a specific item from a container file (ZIP, nested file). Use browseContainer to see available items.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The container file ID"},
- "containerPath": {"type": "string", "description": "Path within the container"},
- },
- "required": ["fileId", "containerPath"],
- },
- readOnly=True,
- )
-
- registry.register(
- "summarizeContent", _summarizeContent,
- description="Generate an AI-powered summary of a file's content. Optionally filter by section, page, or content type.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID"},
- "sectionId": {"type": "string", "description": "Optional: summarize only this section"},
- "pageIndex": {"type": "integer", "description": "Optional: summarize only this page"},
- "contentType": {"type": "string", "description": "Optional: filter by content type"},
- },
- "required": ["fileId"],
- },
- readOnly=True,
- )
-
- # ---- Vision tool ----
-
- async def _describeImage(args: Dict[str, Any], context: Dict[str, Any]):
- """Analyse an image using AI vision. Uses Knowledge Store chunks produced by Extractors."""
- fileId = args.get("fileId", "")
- prompt = args.get("prompt", "Describe this image in detail. Extract all visible text, tables, and data.")
- pageIndex = args.get("pageIndex")
-
- if not fileId:
- return ToolResult(toolCallId="", toolName="describeImage", success=False, error="fileId is required")
-
- try:
- import base64 as _b64
-
- imageData = None
- mimeType = "image/png"
-
- knowledgeService = services.getService("knowledge") if hasattr(services, "getService") else None
-
- # 1) Knowledge Store: image chunks already produced by PdfExtractor / ImageExtractor
- if knowledgeService:
- chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
- imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
- if pageIndex is not None:
- imageChunks = [c for c in imageChunks if c.get("contextRef", {}).get("pageIndex") == pageIndex]
- if imageChunks:
- imageData = imageChunks[0].get("data", "")
- chunkMime = imageChunks[0].get("contextRef", {}).get("mimeType")
- if chunkMime:
- mimeType = chunkMime
-
- # 2) File not yet indexed -> trigger extraction via ExtractionService, then retry
- if not imageData and knowledgeService and not knowledgeService.isFileIndexed(fileId):
- try:
- chatService = services.chat
- fileInfo = chatService.getFileInfo(fileId)
- fileContent = chatService.getFileContent(fileId)
- if fileContent and fileInfo:
- rawData = fileContent.get("data", "")
- if isinstance(rawData, str) and len(rawData) > 100:
- rawBytes = _b64.b64decode(rawData)
- elif isinstance(rawData, bytes):
- rawBytes = rawData
- else:
- rawBytes = None
-
- if rawBytes:
- from modules.serviceCenter.services.serviceExtraction.subRegistry import ExtractorRegistry
- from modules.serviceCenter.services.serviceExtraction.subPipeline import runExtraction
- from modules.datamodels.datamodelExtraction import ExtractionOptions
-
- fileMime = fileInfo.get("mimeType", "application/octet-stream")
- fileName = fileInfo.get("fileName", fileId)
- extracted = runExtraction(
- ExtractorRegistry(), None,
- rawBytes, fileName, fileMime, ExtractionOptions(),
- )
-
- contentObjects = []
- for part in extracted.parts:
- tg = (part.typeGroup or "").lower()
- ct = "image" if tg == "image" else "text"
- if not part.data or not part.data.strip():
- continue
- contentObjects.append({
- "contentObjectId": part.id,
- "contentType": ct,
- "data": part.data,
- "contextRef": {"containerPath": fileName, "location": part.label, **(part.metadata or {})},
- })
-
- if contentObjects:
- _diFiId, _diMId = _resolveFileScope(fileId, context)
- await knowledgeService.indexFile(
- fileId=fileId, fileName=fileName, mimeType=fileMime,
- userId=context.get("userId", ""), contentObjects=contentObjects,
- featureInstanceId=_diFiId,
- mandateId=_diMId,
- )
-
- chunks = knowledgeService._knowledgeDb.getContentChunks(fileId)
- imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
- if pageIndex is not None:
- imageChunks = [c for c in imageChunks if c.get("contextRef", {}).get("pageIndex") == pageIndex]
- if imageChunks:
- imageData = imageChunks[0].get("data", "")
- except Exception as extractErr:
- logger.warning(f"describeImage: on-demand extraction failed: {extractErr}")
-
- # 3) Direct image file (not a container) - use raw file data
- if not imageData:
- chatService = services.chat
- fileContent = chatService.getFileContent(fileId)
- if fileContent:
- fileMimeType = fileContent.get("mimeType", "")
- if fileMimeType.startswith("image/"):
- imageData = fileContent.get("data", "")
- mimeType = fileMimeType
-
- if not imageData:
- chatService = services.chat
- fileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None
- fileName = fileInfo.get("fileName", fileId) if fileInfo else fileId
- fileMime = fileInfo.get("mimeType", "unknown") if fileInfo else "unknown"
- return ToolResult(toolCallId="", toolName="describeImage", success=False,
- error=f"No image data found in '{fileName}' (type: {fileMime}). "
- f"This file likely contains text, not images. Use readFile(fileId=\"{fileId}\") to access its text content.")
-
- try:
- rawHead = _b64.b64decode(imageData[:32])
- if rawHead[:3] == b"\xff\xd8\xff":
- mimeType = "image/jpeg"
- elif rawHead[:8] == b"\x89PNG\r\n\x1a\n":
- mimeType = "image/png"
- elif rawHead[:4] == b"GIF8":
- mimeType = "image/gif"
- elif rawHead[:4] == b"RIFF" and rawHead[8:12] == b"WEBP":
- mimeType = "image/webp"
- except Exception:
- pass
- dataUrl = f"data:{mimeType};base64,{imageData}"
- from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum as OTE
-
- _opType = OTE.IMAGE_ANALYSE
- try:
- from modules.datamodels.datamodelFiles import FileItem as _FileItemModel
- from modules.interfaces.interfaceDbManagement import ComponentObjects as _CO
- _fRow = _CO().db._loadRecord(_FileItemModel, fileId)
- if _fRow:
- _fGet = (lambda k, d=None: _fRow.get(k, d)) if isinstance(_fRow, dict) else (lambda k, d=None: getattr(_fRow, k, d))
- if bool(_fGet("neutralize", False)):
- _opType = OTE.NEUTRALIZATION_IMAGE
- logger.info(f"describeImage: file {fileId} has neutralize=True, using NEUTRALIZATION_IMAGE (internal models only)")
- except Exception:
- pass
-
- visionRequest = AiCallRequest(
- prompt=prompt,
- options=AiCallOptions(operationType=_opType),
- messages=[{"role": "user", "content": [
- {"type": "text", "text": prompt},
- {"type": "image_url", "image_url": {"url": dataUrl}},
- ]}],
- )
- visionResponse = await services.ai.callAi(visionRequest)
-
- if visionResponse.errorCount > 0:
- return ToolResult(toolCallId="", toolName="describeImage", success=False, error=visionResponse.content)
- return ToolResult(toolCallId="", toolName="describeImage", success=True, data=visionResponse.content)
-
- except Exception as e:
- return ToolResult(toolCallId="", toolName="describeImage", success=False, error=str(e))
-
- registry.register(
- "describeImage", _describeImage,
- description="Analyze an image using AI vision. Works with image files and images extracted from PDFs/DOCX/PPTX. Use for OCR, data extraction, and visual analysis.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "The file ID containing the image or document with images"},
- "prompt": {"type": "string", "description": "What to look for in the image (default: describe everything)"},
- "pageIndex": {"type": "integer", "description": "Filter images by page index (0-based, for multi-page documents)"},
- },
- "required": ["fileId"],
- },
- readOnly=True,
- )
-
- # ---- Document rendering tool ----
-
- def _markdownToDocumentJson(markdown: str, title: str, language: str = "de") -> Dict[str, Any]:
- """Convert markdown content to the standard document JSON format expected by renderers."""
- import re as _re
-
- sections = []
- order = 0
- lines = markdown.split("\n")
- i = 0
-
- def _nextId():
- nonlocal order
- order += 1
- return f"s_{order}"
-
- while i < len(lines):
- line = lines[i]
-
- # --- Headings ---
- headingMatch = _re.match(r'^(#{1,6})\s+(.+)', line)
- if headingMatch:
- level = len(headingMatch.group(1))
- text = headingMatch.group(2).strip()
- sections.append({
- "id": _nextId(), "content_type": "heading", "order": order,
- "elements": [{"content": {"text": text, "level": level}}],
- })
- i += 1
- continue
-
- # --- Fenced code blocks ---
- codeMatch = _re.match(r'^```(\w*)', line)
- if codeMatch:
- lang = codeMatch.group(1) or "text"
- codeLines = []
- i += 1
- while i < len(lines) and not lines[i].startswith("```"):
- codeLines.append(lines[i])
- i += 1
- i += 1
- sections.append({
- "id": _nextId(), "content_type": "code_block", "order": order,
- "elements": [{"content": {"code": "\n".join(codeLines), "language": lang}}],
- })
- continue
-
- # --- Tables ---
- tableMatch = _re.match(r'^\|(.+)\|$', line)
- if tableMatch and (i + 1) < len(lines) and _re.match(r'^\|[\s\-:|]+\|$', lines[i + 1]):
- headerCells = [c.strip() for c in tableMatch.group(1).split("|")]
- i += 2
- rows = []
- while i < len(lines) and _re.match(r'^\|(.+)\|$', lines[i]):
- rowCells = [c.strip() for c in lines[i][1:-1].split("|")]
- rows.append(rowCells)
- i += 1
- sections.append({
- "id": _nextId(), "content_type": "table", "order": order,
- "elements": [{"content": {"headers": headerCells, "rows": rows}}],
- })
- continue
-
- # --- Bullet / numbered lists ---
- listMatch = _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', line)
- if listMatch:
- isNumbered = bool(_re.match(r'\d+[.)]', listMatch.group(2)))
- items = []
- while i < len(lines) and _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', lines[i]):
- m = _re.match(r'^(\s*)([-*+]|\d+[.)]) (.+)', lines[i])
- items.append({"text": m.group(3).strip()})
- i += 1
- sections.append({
- "id": _nextId(), "content_type": "bullet_list", "order": order,
- "elements": [{"content": {"items": items, "list_type": "numbered" if isNumbered else "bullet"}}],
- })
- continue
-
- # --- Empty lines (skip) ---
- if not line.strip():
- i += 1
- continue
-
- # --- Images:  or  ---
- imgMatch = _re.match(r'^!\[([^\]]*)\]\(([^)]+)\)', line)
- if imgMatch:
- altText = imgMatch.group(1).strip() or "Image"
- src = imgMatch.group(2).strip()
- fileId = ""
- if src.startswith("file:"):
- fileId = src[5:]
- sections.append({
- "id": _nextId(), "content_type": "image", "order": order,
- "elements": [{
- "content": {
- "altText": altText,
- "base64Data": "",
- "_fileRef": fileId,
- "_srcUrl": src if not fileId else "",
- }
- }],
- })
- i += 1
- continue
-
- # --- Paragraph (collect consecutive non-empty lines) ---
- paraLines = []
- while i < len(lines) and lines[i].strip() and not _re.match(r'^(#{1,6}\s|```|\|.+\||!\[|(\s*)([-*+]|\d+[.)]) )', lines[i]):
- paraLines.append(lines[i])
- i += 1
- if paraLines:
- sections.append({
- "id": _nextId(), "content_type": "paragraph", "order": order,
- "elements": [{"content": {"text": " ".join(paraLines)}}],
- })
- continue
-
- i += 1
-
- if not sections:
- sections.append({
- "id": _nextId(), "content_type": "paragraph", "order": order,
- "elements": [{"content": {"text": markdown.strip() or "(empty)"}}],
- })
-
- return {
- "metadata": {
- "split_strategy": "single_document",
- "source_documents": [],
- "extraction_method": "agent_rendering",
- "title": title,
- "language": language,
- },
- "documents": [{
- "id": "doc_1",
- "title": title,
- "sections": sections,
- }],
- }
-
- async def _renderDocument(args: Dict[str, Any], context: Dict[str, Any]):
- """Render agent-produced markdown content into any document format via the RendererRegistry."""
- import re as _re
- sourceFileId = (args.get("sourceFileId") or "").strip()
- content = args.get("content", "")
- if not isinstance(content, str):
- content = str(content) if content is not None else ""
- outputFormat = args.get("outputFormat", "pdf")
- title = args.get("title", "Document")
- language = args.get("language", "de")
-
- if sourceFileId:
- try:
- dbMgmt = services.chat.interfaceDbComponent
- fileRow = dbMgmt.getFile(sourceFileId)
- if not fileRow:
- return ToolResult(
- toolCallId="",
- toolName="renderDocument",
- success=False,
- error=f"sourceFileId not found: {sourceFileId}",
- )
- rawBytes = dbMgmt.getFileData(sourceFileId)
- if not rawBytes:
- return ToolResult(
- toolCallId="",
- toolName="renderDocument",
- success=False,
- error=f"sourceFileId has no data: {sourceFileId}",
- )
- try:
- content = rawBytes.decode("utf-8")
- except UnicodeDecodeError:
- content = rawBytes.decode("latin-1", errors="replace")
- except Exception as e:
- return ToolResult(
- toolCallId="",
- toolName="renderDocument",
- success=False,
- error=f"Could not read sourceFileId: {e}",
- )
-
- if not (content or "").strip():
- return ToolResult(
- toolCallId="",
- toolName="renderDocument",
- success=False,
- error=(
- "Provide non-empty `content` (markdown) or `sourceFileId` (id of a .md/.txt from writeFile). "
- "For long documents use writeFile create+append, then renderDocument(sourceFileId=...)."
- ),
- )
-
- modelMaxTokens = context.get("modelMaxOutputTokens", 0)
- _inlineCharLimit = int(modelMaxTokens * 3 * 0.5) if modelMaxTokens > 0 else 6000
- _inlineCharLimit = max(_inlineCharLimit, 3000)
-
- if not sourceFileId and len(content) > _inlineCharLimit:
- return ToolResult(
- toolCallId="",
- toolName="renderDocument",
- success=False,
- error=(
- f"Inline `content` is {len(content)} chars — over the {_inlineCharLimit} char limit "
- f"(derived from model output budget of {modelMaxTokens} tokens). "
- "Large documents must use the file path:\n"
- "1. writeFile(mode='create', name='draft.md', content=)\n"
- "2. writeFile(mode='append', fileId=, content=) — repeat as needed\n"
- "3. renderDocument(sourceFileId=, outputFormat='pdf', title='...')\n"
- "This avoids output truncation entirely."
- ),
- )
-
- try:
- structuredContent = _markdownToDocumentJson(content, title, language)
-
- # Resolve image file references (file:fileId) to base64 data from Knowledge Store
- knowledgeService = None
- try:
- knowledgeService = services.getService("knowledge")
- except Exception:
- pass
- resolvedImages = 0
- for doc in structuredContent.get("documents", []):
- for section in doc.get("sections", []):
- if section.get("content_type") != "image":
- continue
- for element in section.get("elements", []):
- contentObj = element.get("content", {})
- fileRef = contentObj.get("_fileRef", "")
- if not fileRef or contentObj.get("base64Data"):
- continue
- if knowledgeService:
- chunks = knowledgeService._knowledgeDb.getContentChunks(fileRef)
- imageChunks = [c for c in (chunks or []) if c.get("contentType") == "image"]
- if imageChunks:
- contentObj["base64Data"] = imageChunks[0].get("data", "")
- chunkMime = imageChunks[0].get("contextRef", {}).get("mimeType", "image/png")
- contentObj["mimeType"] = chunkMime
- resolvedImages += 1
- if not contentObj.get("base64Data"):
- try:
- rawBytes = services.chat.getFileData(fileRef)
- if rawBytes:
- import base64 as _b64
- contentObj["base64Data"] = _b64.b64encode(rawBytes).decode("ascii")
- contentObj["mimeType"] = "image/png"
- resolvedImages += 1
- except Exception:
- pass
- contentObj.pop("_fileRef", None)
- contentObj.pop("_srcUrl", None)
-
- sectionCount = len(structuredContent.get("documents", [{}])[0].get("sections", []))
- logger.info(f"renderDocument: parsed {sectionCount} sections from markdown ({len(content)} chars), resolved {resolvedImages} image(s), format={outputFormat}")
-
- generationService = services.getService("generation")
- documents = await generationService.renderReport(
- extractedContent=structuredContent,
- outputFormat=outputFormat,
- language=language,
- title=title,
- userPrompt=content,
- )
-
- if not documents:
- return ToolResult(toolCallId="", toolName="renderDocument", success=False, error="Rendering produced no output")
-
- savedFiles = []
- sideEvents = []
- chatService = services.chat
-
- sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "document"
-
- for doc in documents:
- docData = doc.documentData if hasattr(doc, "documentData") else b""
- docName = doc.filename if hasattr(doc, "filename") else f"{sanitizedTitle}.{outputFormat}"
- docMime = doc.mimeType if hasattr(doc, "mimeType") else "application/octet-stream"
-
- if not docName.lower().endswith(f".{outputFormat}"):
- docName = f"{sanitizedTitle}.{outputFormat}"
-
- fileItem = None
- if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
- fileItem = chatService.interfaceDbComponent.saveGeneratedFile(docData, docName, docMime)
- else:
- fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docData, docName)
-
- if fileItem:
- fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
- fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
- if fiId:
- chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
- tempFolderId = _getOrCreateTempFolder(chatService)
- if tempFolderId:
- chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
- savedFiles.append(f"- {docName} (id: {fid})")
- sideEvents.append({
- "type": "fileCreated",
- "data": {
- "fileId": fid,
- "fileName": docName,
- "mimeType": docMime,
- "fileSize": len(docData),
- },
- })
-
- result = f"Rendered {len(documents)} document(s):\n" + "\n".join(savedFiles)
- return ToolResult(toolCallId="", toolName="renderDocument", success=True, data=result, sideEvents=sideEvents)
-
- except Exception as e:
- logger.error(f"renderDocument failed: {e}")
- return ToolResult(toolCallId="", toolName="renderDocument", success=False, error=str(e))
-
- registry.register(
- "renderDocument", _renderDocument,
- description=(
- "Render markdown into a document file (PDF, DOCX, XLSX, PPTX, CSV, HTML, MD, JSON, TXT). "
- "For long documents: write markdown with writeFile (mode=create then append chunks), then call this tool with "
- "`sourceFileId` only (tiny JSON — avoids model output truncation). For short docs you may pass `content` inline. "
- "Images:  in the markdown."
- ),
- parameters={
- "type": "object",
- "properties": {
- "content": {
- "type": "string",
- "description": "Full markdown inline. Prefer `sourceFileId` when the document is large (many KB).",
- },
- "sourceFileId": {
- "type": "string",
- "description": "Chat file id of markdown saved via writeFile (create+append). Use this instead of `content` for long PDFs.",
- },
- "outputFormat": {"type": "string", "description": "Target format: pdf, docx, xlsx, pptx, csv, html, md, json, txt", "default": "pdf"},
- "title": {"type": "string", "description": "Document title", "default": "Document"},
- "language": {"type": "string", "description": "Document language (ISO 639-1)", "default": "de"},
- },
- },
- readOnly=False,
- )
-
- # ── textToSpeech tool ──────────────────────────────────────────────
- def _stripMarkdownForTts(text: str) -> str:
- """Strip markdown formatting so TTS reads clean speech text."""
- import re as _re
- t = text
- t = _re.sub(r'\*\*(.+?)\*\*', r'\1', t)
- t = _re.sub(r'\*(.+?)\*', r'\1', t)
- t = _re.sub(r'__(.+?)__', r'\1', t)
- t = _re.sub(r'_(.+?)_', r'\1', t)
- t = _re.sub(r'`[^`]+`', lambda m: m.group(0)[1:-1], t)
- t = _re.sub(r'^#{1,6}\s*', '', t, flags=_re.MULTILINE)
- t = _re.sub(r'^\s*[-*+]\s+', '', t, flags=_re.MULTILINE)
- t = _re.sub(r'^\s*\d+\.\s+', '', t, flags=_re.MULTILINE)
- t = _re.sub(r'\[(.+?)\]\(.+?\)', r'\1', t)
- t = _re.sub(r'!\[.*?\]\(.*?\)', '', t)
- t = _re.sub(r'\n{3,}', '\n\n', t)
- return t.strip()
-
- async def _textToSpeech(args: Dict[str, Any], context: Dict[str, Any]):
- """Convert text to speech using Google Cloud TTS, deliver audio via SSE."""
- import base64 as _b64
- text = args.get("text", "")
- language = args.get("language", "auto")
- voiceName = args.get("voiceName")
-
- if not text:
- return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="text is required")
-
- cleanText = _stripMarkdownForTts(text)
- if not cleanText:
- return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="text is empty after stripping markdown")
-
- try:
- from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- mandateId = context.get("mandateId", "")
- voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
-
- _ISO_TO_BCP47 = {
- "de": "de-DE", "en": "en-US", "fr": "fr-FR", "it": "it-IT",
- "es": "es-ES", "pt": "pt-BR", "nl": "nl-NL", "pl": "pl-PL",
- "ru": "ru-RU", "ja": "ja-JP", "zh": "zh-CN", "ko": "ko-KR",
- "ar": "ar-XA", "hi": "hi-IN", "tr": "tr-TR", "sv": "sv-SE",
- }
-
- if language == "auto":
- try:
- snippet = cleanText[:500]
- detectResult = await voiceInterface.detectLanguage(snippet)
- if detectResult and detectResult.get("success"):
- detected = detectResult.get("language", "de")
- language = _ISO_TO_BCP47.get(detected, detected)
- if "-" not in language:
- language = _ISO_TO_BCP47.get(language, f"{language}-{language.upper()}")
- logger.info(f"textToSpeech: auto-detected language '{detected}' -> '{language}'")
- else:
- language = "de-DE"
- except Exception as detectErr:
- logger.warning(f"textToSpeech: language detection failed: {detectErr}, defaulting to de-DE")
- language = "de-DE"
-
- if not voiceName:
- try:
- from modules.datamodels.datamodelUam import UserVoicePreferences
- from modules.interfaces.interfaceDbApp import getRootInterface
- userId = context.get("userId", "")
- if userId:
- rootIf = getRootInterface()
- prefRecords = rootIf.db.getRecordset(
- UserVoicePreferences,
- recordFilter={"userId": userId}
- )
- if prefRecords:
- allPrefs = [
- r if isinstance(r, dict) else r.model_dump() if hasattr(r, "model_dump") else r
- for r in prefRecords
- ]
- _mid = str(mandateId or "").strip()
- scopedPref = next((p for p in allPrefs if str(p.get("mandateId") or "").strip() == _mid), None)
- globalPref = next((p for p in allPrefs if not str(p.get("mandateId") or "").strip()), None)
-
- def _resolveVoiceFromMap(prefDict, lang):
- vm = (prefDict or {}).get("ttsVoiceMap", {}) or {}
- if not isinstance(vm, dict) or not vm:
- return None
- baseLang = lang.split("-")[0].lower() if isinstance(lang, str) and lang else ""
- langNorm = str(lang or "").strip()
- if langNorm in vm:
- entry = vm[langNorm]
- return entry.get("voiceName") if isinstance(entry, dict) else entry
- if baseLang and baseLang in vm:
- entry = vm[baseLang]
- return entry.get("voiceName") if isinstance(entry, dict) else entry
- if baseLang:
- for mk, mv in vm.items():
- mkn = str(mk).lower()
- if mkn == baseLang or mkn.startswith(f"{baseLang}-"):
- return mv.get("voiceName") if isinstance(mv, dict) else mv
- return None
-
- voiceName = (
- _resolveVoiceFromMap(scopedPref, language)
- or _resolveVoiceFromMap(globalPref, language)
- or _resolveVoiceFromMap(allPrefs[0], language)
- )
- if not voiceName:
- for candidate in [globalPref, scopedPref, allPrefs[0]]:
- if candidate and candidate.get("ttsVoice") and candidate.get("ttsLanguage") == language:
- voiceName = candidate["ttsVoice"]
- break
- if voiceName:
- logger.info(f"textToSpeech: using configured voice '{voiceName}' for language '{language}'")
- except Exception as prefErr:
- logger.debug(f"textToSpeech: could not load voice preferences: {prefErr}")
-
- ttsResult = await voiceInterface.textToSpeech(
- text=cleanText,
- languageCode=language,
- voiceName=voiceName,
- )
-
- if not ttsResult or not ttsResult.get("success"):
- errMsg = ttsResult.get("error", "TTS call failed") if ttsResult else "TTS returned None"
- return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error=errMsg)
-
- audioContent = ttsResult.get("audioContent", "")
- if not audioContent:
- return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error="TTS returned no audio")
-
- if isinstance(audioContent, bytes):
- audioB64 = _b64.b64encode(audioContent).decode("ascii")
- elif isinstance(audioContent, str):
- audioB64 = audioContent
- else:
- audioB64 = str(audioContent)
-
- audioFormat = ttsResult.get("audioFormat", "mp3")
- charCount = len(cleanText)
- usedVoice = voiceName or "default"
- logger.info(f"textToSpeech: generated {audioFormat} audio for {charCount} chars, language={language}, voice={usedVoice}")
-
- return ToolResult(
- toolCallId="", toolName="textToSpeech", success=True,
- data=f"Audio generated ({charCount} characters, language={language}, voice={usedVoice}). Playing in chat.",
- sideEvents=[{
- "type": "voiceResponse",
- "data": {
- "audio": audioB64,
- "format": audioFormat,
- "language": language,
- "charCount": charCount,
- },
- }],
- )
-
- except ImportError:
- return ToolResult(toolCallId="", toolName="textToSpeech", success=False,
- error="Voice interface not available (missing dependency)")
- except Exception as e:
- logger.error(f"textToSpeech failed: {e}")
- return ToolResult(toolCallId="", toolName="textToSpeech", success=False, error=str(e))
-
- registry.register(
- "textToSpeech", _textToSpeech,
- description=(
- "Convert text to speech audio. The audio is played directly in the chat. "
- "Use this when the user asks you to read something aloud, narrate, or speak. "
- "Language is auto-detected from the text content. You do NOT need to specify a language."
- ),
- parameters={
- "type": "object",
- "properties": {
- "text": {"type": "string", "description": "The text to convert to speech. Can include markdown (will be stripped automatically)."},
- "language": {"type": "string", "description": "BCP-47 language code (e.g. de-DE, en-US) or 'auto' for automatic detection", "default": "auto"},
- "voiceName": {"type": "string", "description": "Optional specific voice name. If omitted, uses the configured voice for the detected language."},
- },
- "required": ["text"],
- },
- readOnly=False,
- )
-
- # ── generateImage tool ─────────────────────────────────────────────
-
- async def _generateImage(args: Dict[str, Any], context: Dict[str, Any]):
- """Generate an image from a text prompt using AI (DALL-E)."""
- import re as _re
-
- prompt = (args.get("prompt") or "").strip()
- style = (args.get("style") or "").strip() or None
- title = (args.get("title") or "").strip() or "Generated Image"
-
- if not prompt:
- return ToolResult(toolCallId="", toolName="generateImage", success=False, error="prompt is required")
-
- try:
- from modules.serviceCenter.services.serviceGeneration.paths.imagePath import ImageGenerationPath
-
- imagePath = ImageGenerationPath(services)
- aiResponse = await imagePath.generateImages(
- userPrompt=prompt,
- count=1,
- style=style,
- format="png",
- title=title,
- )
-
- if not aiResponse.documents:
- return ToolResult(toolCallId="", toolName="generateImage", success=False, error="Image generation returned no image data")
-
- sideEvents = []
- savedFiles = []
- chatService = services.chat
- sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "generated_image"
-
- for doc in aiResponse.documents:
- docData = doc.documentData if hasattr(doc, "documentData") else b""
- docName = doc.documentName if hasattr(doc, "documentName") else f"{sanitizedTitle}.png"
- docMime = doc.mimeType if hasattr(doc, "mimeType") else "image/png"
-
- if not docName.lower().endswith(".png"):
- docName = f"{sanitizedTitle}.png"
-
- fileItem = None
- if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
- fileItem = chatService.interfaceDbComponent.saveGeneratedFile(docData, docName, docMime)
- else:
- fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(docData, docName)
-
- if fileItem:
- fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?")
- fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
- if fiId:
- chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
- tempFolderId = _getOrCreateTempFolder(chatService)
- if tempFolderId:
- chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
- savedFiles.append(f"- {docName} (id: {fid})")
- sideEvents.append({
- "type": "fileCreated",
- "data": {
- "fileId": fid,
- "fileName": docName,
- "mimeType": docMime,
- "fileSize": len(docData),
- },
- })
-
- result = f"Generated {len(aiResponse.documents)} image(s):\n" + "\n".join(savedFiles)
- return ToolResult(toolCallId="", toolName="generateImage", success=True, data=result, sideEvents=sideEvents)
-
- except Exception as e:
- logger.error(f"generateImage failed: {e}")
- return ToolResult(toolCallId="", toolName="generateImage", success=False, error=str(e))
-
- registry.register(
- "generateImage", _generateImage,
- description=(
- "Generate an image from a text description using AI (DALL-E). "
- "The generated image is saved as a file in the workspace. "
- "Use this when the user asks to create, generate, draw, or design an image, illustration, icon, logo, diagram, or any visual content. "
- "Provide a detailed, descriptive prompt for best results."
- ),
- parameters={
- "type": "object",
- "properties": {
- "prompt": {"type": "string", "description": "Detailed description of the image to generate. Be specific about subject, composition, colors, style, and mood."},
- "style": {"type": "string", "description": "Optional style modifier (e.g. 'photorealistic', 'watercolor', 'digital art', 'minimalist', 'sketch')"},
- "title": {"type": "string", "description": "Title/filename for the generated image", "default": "Generated Image"},
- },
- "required": ["prompt"],
- },
- readOnly=False,
- )
-
- # ── createChart tool ─────────────────────────────────────────────────
-
- async def _createChart(args: Dict[str, Any], context: Dict[str, Any]):
- """Create a data chart as PNG image using matplotlib."""
- import re as _re
-
- chartType = (args.get("chartType") or "bar").strip().lower()
- title = (args.get("title") or "Chart").strip()
- labels = args.get("labels") or []
- datasets = args.get("datasets") or []
- xLabel = (args.get("xLabel") or "").strip()
- yLabel = (args.get("yLabel") or "").strip()
- width = min(max(args.get("width") or 10, 4), 20)
- height = min(max(args.get("height") or 6, 3), 14)
- colors = args.get("colors") or None
-
- if not datasets:
- return ToolResult(toolCallId="", toolName="createChart", success=False, error="datasets is required (list of {label, values})")
-
- try:
- import matplotlib
- matplotlib.use("Agg")
- import logging as _mpllog
- _mpllog.getLogger("matplotlib").setLevel(_mpllog.WARNING)
- import matplotlib.pyplot as plt
- import io
-
- _DEFAULT_COLORS = [
- "#4285F4", "#EA4335", "#FBBC04", "#34A853", "#FF6D01",
- "#46BDC6", "#7B61FF", "#F538A0", "#00ACC1", "#AB47BC",
- ]
- usedColors = colors if colors and len(colors) >= len(datasets) else _DEFAULT_COLORS
-
- fig, ax = plt.subplots(figsize=(width, height))
- fig.patch.set_facecolor("#FFFFFF")
- ax.set_facecolor("#FAFAFA")
-
- if chartType in ("pie", "donut"):
- values = datasets[0].get("values", []) if datasets else []
- explode = [0.02] * len(values)
- wedges, texts, autotexts = ax.pie(
- values, labels=labels, autopct="%1.1f%%",
- colors=usedColors[:len(values)], explode=explode,
- textprops={"fontsize": 9},
- )
- if chartType == "donut":
- ax.add_artist(plt.Circle((0, 0), 0.55, fc="white"))
- ax.set_title(title, fontsize=14, fontweight="bold", pad=16)
-
- else:
- import numpy as _np
- x = _np.arange(len(labels)) if labels else _np.arange(max(len(d.get("values", [])) for d in datasets))
- barWidth = 0.8 / max(len(datasets), 1)
-
- for i, ds in enumerate(datasets):
- dsLabel = ds.get("label", f"Series {i+1}")
- values = ds.get("values", [])
- color = usedColors[i % len(usedColors)]
-
- if chartType == "bar":
- offset = (i - len(datasets) / 2 + 0.5) * barWidth
- ax.bar(x + offset, values, barWidth, label=dsLabel, color=color, edgecolor="white", linewidth=0.5)
- elif chartType == "horizontalbar":
- offset = (i - len(datasets) / 2 + 0.5) * barWidth
- ax.barh(x + offset, values, barWidth, label=dsLabel, color=color, edgecolor="white", linewidth=0.5)
- elif chartType == "line":
- ax.plot(x[:len(values)], values, marker="o", markersize=5, label=dsLabel, color=color, linewidth=2)
- elif chartType == "area":
- ax.fill_between(x[:len(values)], values, alpha=0.3, color=color)
- ax.plot(x[:len(values)], values, label=dsLabel, color=color, linewidth=2)
- elif chartType == "scatter":
- ax.scatter(x[:len(values)], values, label=dsLabel, color=color, s=50, edgecolors="white", linewidth=0.5)
- else:
- ax.bar(x, values, label=dsLabel, color=color)
-
- if labels:
- if chartType == "horizontalbar":
- ax.set_yticks(x)
- ax.set_yticklabels(labels, fontsize=9)
- else:
- ax.set_xticks(x)
- ax.set_xticklabels(labels, fontsize=9, rotation=45 if len(labels) > 6 else 0, ha="right" if len(labels) > 6 else "center")
-
- ax.set_title(title, fontsize=14, fontweight="bold", pad=12)
- if xLabel:
- ax.set_xlabel(xLabel, fontsize=10)
- if yLabel:
- ax.set_ylabel(yLabel, fontsize=10)
- if len(datasets) > 1:
- ax.legend(fontsize=9, framealpha=0.9)
- ax.grid(axis="y", alpha=0.3, linestyle="--")
- ax.spines["top"].set_visible(False)
- ax.spines["right"].set_visible(False)
-
- plt.tight_layout()
- buf = io.BytesIO()
- fig.savefig(buf, format="png", dpi=150, bbox_inches="tight")
- plt.close(fig)
- pngData = buf.getvalue()
-
- chatService = services.chat
- sanitizedTitle = _re.sub(r'[^\w._-]', '_', title, flags=_re.UNICODE).strip('_') or "chart"
- fileName = f"{sanitizedTitle}.png"
-
- if hasattr(chatService.interfaceDbComponent, "saveGeneratedFile"):
- fileItem = chatService.interfaceDbComponent.saveGeneratedFile(pngData, fileName, "image/png")
- else:
- fileItem, _ = chatService.interfaceDbComponent.saveUploadedFile(pngData, fileName)
-
- fid = fileItem.id if hasattr(fileItem, "id") else fileItem.get("id", "?") if isinstance(fileItem, dict) else "?"
- fiId = context.get("featureInstanceId") or (services.featureInstanceId if services else "")
- if fiId and fid != "?":
- chatService.interfaceDbComponent.updateFile(fid, {"featureInstanceId": fiId})
- tempFolderId = _getOrCreateTempFolder(chatService)
- if tempFolderId and fid != "?":
- chatService.interfaceDbComponent.updateFile(fid, {"folderId": tempFolderId})
-
- sideEvents = [{"type": "fileCreated", "data": {
- "fileId": fid, "fileName": fileName,
- "mimeType": "image/png", "fileSize": len(pngData),
- }}]
- return ToolResult(
- toolCallId="", toolName="createChart", success=True,
- data=f"Chart saved as '{fileName}' (id: {fid}, {len(pngData)} bytes). "
- f"Embed in documents with: ",
- sideEvents=sideEvents,
- )
-
- except Exception as e:
- logger.error(f"createChart failed: {e}", exc_info=True)
- return ToolResult(toolCallId="", toolName="createChart", success=False, error=str(e))
-
- registry.register(
- "createChart", _createChart,
- description=(
- "Create a data chart/graph as a PNG image using matplotlib. "
- "Supported types: bar, horizontalBar, line, area, scatter, pie, donut. "
- "The chart is saved as a file in the workspace. "
- "Use the returned fileId to embed in documents via renderDocument: . "
- "Provide structured data with labels and datasets."
- ),
- parameters={
- "type": "object",
- "properties": {
- "chartType": {
- "type": "string",
- "enum": ["bar", "horizontalBar", "line", "area", "scatter", "pie", "donut"],
- "description": "Chart type (default: bar)",
- },
- "title": {"type": "string", "description": "Chart title"},
- "labels": {
- "type": "array", "items": {"type": "string"},
- "description": "X-axis labels / category names",
- },
- "datasets": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "label": {"type": "string", "description": "Series name (legend)"},
- "values": {"type": "array", "items": {"type": "number"}, "description": "Data values"},
- },
- "required": ["values"],
- },
- "description": "Data series to plot",
- },
- "xLabel": {"type": "string", "description": "X-axis label"},
- "yLabel": {"type": "string", "description": "Y-axis label"},
- "colors": {
- "type": "array", "items": {"type": "string"},
- "description": "Custom hex colors for series (e.g. ['#4285F4', '#EA4335'])",
- },
- "width": {"type": "number", "description": "Figure width in inches (4-20, default 10)"},
- "height": {"type": "number", "description": "Figure height in inches (3-14, default 6)"},
- },
- "required": ["datasets"],
- },
- readOnly=False,
- )
-
- # ── Phase 3: speechToText, detectLanguage, neutralizeData, executeCode ──
-
- async def _speechToText(args: Dict[str, Any], context: Dict[str, Any]):
- fileId = args.get("fileId", "")
- if not fileId:
- return ToolResult(toolCallId="", toolName="speechToText", success=False, error="fileId is required")
- try:
- chatService = services.chat
- audioData = chatService.interfaceDbComponent.getFileData(fileId)
- if not audioData:
- return ToolResult(toolCallId="", toolName="speechToText", success=False, error=f"No data found for file {fileId}")
- from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- mandateId = context.get("mandateId", "")
- voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
- language = args.get("language", "de-DE")
- result = await voiceInterface.speechToText(audioData, language=language)
- if result and result.get("success"):
- transcript = result.get("text", "")
- confidence = result.get("confidence", 0)
- return ToolResult(
- toolCallId="", toolName="speechToText", success=True,
- data=f"Transcript (confidence: {confidence:.0%}):\n{transcript}"
- )
- return ToolResult(toolCallId="", toolName="speechToText", success=False, error=result.get("error", "Transcription failed"))
- except Exception as e:
- return ToolResult(toolCallId="", toolName="speechToText", success=False, error=str(e))
-
- async def _detectLanguage(args: Dict[str, Any], context: Dict[str, Any]):
- text = args.get("text", "")
- if not text:
- return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error="text is required")
- try:
- from modules.interfaces.interfaceVoiceObjects import getVoiceInterface
- mandateId = context.get("mandateId", "")
- voiceInterface = getVoiceInterface(currentUser=None, mandateId=mandateId)
- result = await voiceInterface.detectLanguage(text)
- if result and result.get("success"):
- lang = result.get("language", "unknown")
- return ToolResult(toolCallId="", toolName="detectLanguage", success=True, data=f"Detected language: {lang}")
- return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error=result.get("error", "Detection failed"))
- except Exception as e:
- return ToolResult(toolCallId="", toolName="detectLanguage", success=False, error=str(e))
-
- async def _neutralizeData(args: Dict[str, Any], context: Dict[str, Any]):
- text = args.get("text", "")
- fileId = args.get("fileId", "")
- if not text and not fileId:
- return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="text or fileId is required")
- try:
- neutralizationService = services.getService("neutralization")
- if not neutralizationService:
- return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="Neutralization service not available")
- if not neutralizationService.interfaceDbComponent:
- neutralizationService.interfaceDbComponent = services.chat.interfaceDbComponent
- if text:
- result = await neutralizationService.processTextAsync(text, fileId or None)
- else:
- result = neutralizationService.processFile(fileId)
- if result:
- neutralized = result.get("neutralized_text", "") or result.get("result", str(result))
- return ToolResult(toolCallId="", toolName="neutralizeData", success=True, data=neutralized)
- return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error="Neutralization returned no result")
- except Exception as e:
- return ToolResult(toolCallId="", toolName="neutralizeData", success=False, error=str(e))
-
- async def _executeCode(args: Dict[str, Any], context: Dict[str, Any]):
- code = args.get("code", "")
- language = args.get("language", "python")
- if not code:
- return ToolResult(toolCallId="", toolName="executeCode", success=False, error="code is required")
- if language != "python":
- return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"Language '{language}' not supported. Only 'python' is available.")
- try:
- from modules.serviceCenter.services.serviceAgent.sandboxExecutor import executePython
- result = await executePython(code)
- if result.get("success"):
- output = result.get("output", "(no output)")
- return ToolResult(toolCallId="", toolName="executeCode", success=True, data=output)
- error = result.get("error", "Execution failed")
- tb = result.get("traceback", "")
- return ToolResult(toolCallId="", toolName="executeCode", success=False, error=f"{error}\n{tb}" if tb else error)
- except Exception as e:
- return ToolResult(toolCallId="", toolName="executeCode", success=False, error=str(e))
-
- registry.register(
- "speechToText", _speechToText,
- description="Transcribe an audio file to text using speech recognition. Returns the transcript with confidence score.",
- parameters={
- "type": "object",
- "properties": {
- "fileId": {"type": "string", "description": "Audio file ID from the workspace"},
- "language": {"type": "string", "description": "BCP-47 language code (e.g. 'de-DE', 'en-US'). Default: 'de-DE'"},
- },
- "required": ["fileId"]
- },
- readOnly=True
- )
-
- registry.register(
- "detectLanguage", _detectLanguage,
- description="Detect the language of a text snippet. Returns ISO 639-1 code (e.g. 'de', 'en').",
- parameters={
- "type": "object",
- "properties": {
- "text": {"type": "string", "description": "Text to analyze"},
- },
- "required": ["text"]
- },
- readOnly=True
- )
-
- registry.register(
- "neutralizeData", _neutralizeData,
- description="Anonymize text or file content by replacing personal data (names, addresses, etc.) with placeholders. Non-destructive -- returns the anonymized copy.",
- parameters={
- "type": "object",
- "properties": {
- "text": {"type": "string", "description": "Text to anonymize"},
- "fileId": {"type": "string", "description": "File ID to anonymize (alternative to text)"},
- },
- },
- readOnly=True
- )
-
- registry.register(
- "executeCode", _executeCode,
- description=(
- "Execute Python code in a sandboxed environment for calculations and data analysis. "
- "Available modules: math, statistics, json, csv, re, datetime, collections, itertools, functools, decimal, fractions, random. "
- "No file system, network, or OS access. Max 30s execution time. "
- "Use print() to produce output."
- ),
- parameters={
- "type": "object",
- "properties": {
- "code": {"type": "string", "description": "Python code to execute"},
- "language": {"type": "string", "description": "Programming language (only 'python' supported)", "default": "python"},
- },
- "required": ["code"]
- },
- readOnly=True
- )
-
- # ---- Feature Data Sub-Agent tool ----
-
- async def _queryFeatureInstance(args: Dict[str, Any], context: Dict[str, Any]):
- """Delegate a question to the Feature Data Sub-Agent."""
- featureInstanceId = args.get("featureInstanceId", "")
- question = args.get("question", "")
- if not featureInstanceId or not question:
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=False, error="featureInstanceId and question are required",
- )
- try:
- from modules.serviceCenter.services.serviceAgent.featureDataAgent import runFeatureDataAgent
- from modules.datamodels.datamodelFeatureDataSource import FeatureDataSource
- from modules.interfaces.interfaceDbApp import getRootInterface
-
- rootIf = getRootInterface()
- instance = rootIf.getFeatureInstance(featureInstanceId)
- if not instance:
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=False, error=f"Feature instance {featureInstanceId} not found",
- )
-
- featureCode = instance.featureCode
- mandateId = instance.mandateId or ""
- instanceLabel = instance.label or ""
- userId = context.get("userId", "")
- workspaceInstanceId = context.get("featureInstanceId", "")
-
- rootDbConn = rootIf.db if hasattr(rootIf, "db") else None
- if rootDbConn is None:
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=False, error="No database connector available",
- )
-
- featureDataSources = rootDbConn.getRecordset(
- FeatureDataSource,
- recordFilter={"featureInstanceId": featureInstanceId, "workspaceInstanceId": workspaceInstanceId},
- )
-
- _anySourceNeutralize = any(
- bool(ds.get("neutralize", False) if isinstance(ds, dict) else getattr(ds, "neutralize", False))
- for ds in (featureDataSources or [])
- )
-
- from modules.security.rbacCatalog import getCatalogService
- catalog = getCatalogService()
- tableFilters = {}
- if not featureDataSources:
- selectedTables = catalog.getDataObjects(featureCode)
- else:
- allObjs = {o["meta"]["table"]: o for o in catalog.getDataObjects(featureCode) if "meta" in o and "table" in o.get("meta", {})}
- selectedTables = [allObjs[ds["tableName"]] for ds in featureDataSources if ds.get("tableName") in allObjs]
- for ds in featureDataSources:
- rf = ds.get("recordFilter")
- if rf and isinstance(rf, dict) and ds.get("tableName"):
- tableFilters[ds["tableName"]] = rf
-
- if not selectedTables:
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=False, error=f"No data tables available for feature '{featureCode}'",
- )
-
- from modules.connectors.connectorDbPostgre import DatabaseConnector
- from modules.shared.configuration import APP_CONFIG
- featureDbName = f"poweron_{featureCode.lower()}"
- featureDbConn = DatabaseConnector(
- dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
- dbDatabase=featureDbName,
- dbUser=APP_CONFIG.get("DB_USER"),
- dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
- dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
- userId=userId or "agent",
- )
-
- aiService = services.ai if hasattr(services, "ai") else None
- if aiService is None:
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=False, error="AI service not available for sub-agent",
- )
-
- async def _subAgentAiCall(req):
- if _anySourceNeutralize:
- req.requireNeutralization = True
- return await aiService.callAi(req)
-
- try:
- answer = await runFeatureDataAgent(
- question=question,
- featureInstanceId=featureInstanceId,
- featureCode=featureCode,
- selectedTables=selectedTables,
- mandateId=mandateId,
- userId=userId,
- aiCallFn=_subAgentAiCall,
- dbConnector=featureDbConn,
- instanceLabel=instanceLabel,
- tableFilters=tableFilters,
- )
- finally:
- try:
- featureDbConn.close()
- except Exception:
- pass
-
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=True, data=answer,
- )
- except Exception as e:
- logger.error(f"queryFeatureInstance failed: {e}", exc_info=True)
- return ToolResult(
- toolCallId="", toolName="queryFeatureInstance",
- success=False, error=str(e),
- )
-
- registry.register(
- "queryFeatureInstance", _queryFeatureInstance,
- description=(
- "Query data from a feature instance (e.g. Trustee, CommCoach). "
- "Delegates to a specialized sub-agent that knows the feature's data schema "
- "and can browse/query its tables. Use this when the user has attached "
- "feature data sources or asks about feature-specific data."
- ),
- parameters={
- "type": "object",
- "properties": {
- "featureInstanceId": {"type": "string", "description": "ID of the feature instance to query"},
- "question": {"type": "string", "description": "What data to find or analyze from this feature instance"},
- },
- "required": ["featureInstanceId", "question"]
- },
- readOnly=True
- )
-
- # ---- Cross-workflow tools ----
-
- async def _listWorkflowHistory(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
- """List all chat workflows in this workspace with metadata."""
- import json as _json
- try:
- chatService = services.chat
- chatInterface = chatService.interfaceDbChat
- allWorkflows = chatInterface.getWorkflows() or []
-
- allWorkflows.sort(
- key=lambda w: w.get("sysCreatedAt") or w.get("startedAt") or 0,
- reverse=True,
- )
- allWorkflows = allWorkflows[:50]
-
- items = []
- for wf in allWorkflows:
- wfId = wf.get("id", "")
- name = wf.get("name") or "(unnamed)"
- createdAt = wf.get("sysCreatedAt") or wf.get("startedAt") or 0
- lastActivity = wf.get("lastActivity") or createdAt
-
- msgs = chatInterface.getMessages(wfId) or []
- messageCount = len(msgs)
- lastPreview = ""
- if msgs:
- lastMsg = msgs[-1] if isinstance(msgs[-1], dict) else (
- msgs[-1].model_dump() if hasattr(msgs[-1], "model_dump") else {}
- )
- content = lastMsg.get("message") or lastMsg.get("content") or ""
- lastPreview = content[:150]
-
- items.append({
- "id": wfId,
- "name": name,
- "createdAt": createdAt,
- "lastActivity": lastActivity,
- "messageCount": messageCount,
- "lastMessagePreview": lastPreview,
- })
-
- return ToolResult(
- toolCallId="", toolName="listWorkflowHistory",
- success=True, data=_json.dumps(items, ensure_ascii=False),
- )
- except Exception as e:
- return ToolResult(
- toolCallId="", toolName="listWorkflowHistory",
- success=False, error=str(e),
- )
-
- registry.register(
- "listWorkflowHistory", _listWorkflowHistory,
- description=(
- "List all chat conversations/workflows in this workspace. "
- "Returns id, name, createdAt, lastActivity, messageCount, and a preview "
- "of the last message for each workflow. Use this to discover previous "
- "conversations when the user asks about past chats or wants a summary "
- "across conversations."
- ),
- parameters={
- "type": "object",
- "properties": {},
- },
- readOnly=True,
- )
-
- async def _readWorkflowMessages(args: Dict[str, Any], context: Dict[str, Any]) -> ToolResult:
- """Read messages from a specific workflow."""
- import json as _json
- targetWorkflowId = args.get("workflowId", "")
- limit = int(args.get("limit", 20))
- offset = int(args.get("offset", 0))
-
- if not targetWorkflowId:
- return ToolResult(
- toolCallId="", toolName="readWorkflowMessages",
- success=False, error="workflowId is required",
- )
-
- try:
- chatService = services.chat
- chatInterface = chatService.interfaceDbChat
- allMsgs = chatInterface.getMessages(targetWorkflowId) or []
-
- sliced = allMsgs[offset:offset + limit]
- items = []
- for msg in sliced:
- raw = msg if isinstance(msg, dict) else (
- msg.model_dump() if hasattr(msg, "model_dump") else {}
- )
- content = raw.get("message") or raw.get("content") or ""
- if len(content) > 2000:
- content = content[:2000] + "..."
- items.append({
- "role": raw.get("role", ""),
- "message": content,
- "publishedAt": raw.get("publishedAt") or raw.get("sysCreatedAt") or 0,
- })
-
- header = f"Workflow {targetWorkflowId}: {len(allMsgs)} total messages"
- if offset > 0 or len(allMsgs) > offset + limit:
- header += f" (showing {offset + 1}-{offset + len(sliced)})"
-
- return ToolResult(
- toolCallId="", toolName="readWorkflowMessages",
- success=True,
- data=header + "\n" + _json.dumps(items, ensure_ascii=False),
- )
- except Exception as e:
- return ToolResult(
- toolCallId="", toolName="readWorkflowMessages",
- success=False, error=str(e),
- )
-
- registry.register(
- "readWorkflowMessages", _readWorkflowMessages,
- description=(
- "Read messages from a specific chat workflow/conversation. "
- "Use this after listWorkflowHistory to read the content of a "
- "specific past conversation. Supports pagination via offset/limit."
- ),
- parameters={
- "type": "object",
- "properties": {
- "workflowId": {"type": "string", "description": "ID of the workflow to read messages from"},
- "limit": {"type": "integer", "description": "Max messages to return (default 20)"},
- "offset": {"type": "integer", "description": "Skip first N messages (default 0)"},
- },
- "required": ["workflowId"],
- },
- readOnly=True,
- )
-
- # Tag core-only tools so restricted toolSets (e.g. "commcoach") exclude them.
- # Tools NOT in this set remain toolSet=None → available to ALL sets.
- _CORE_ONLY_TOOLS = {
- "listFiles", "listFolders", "tagFile", "moveFile", "createFolder",
- "writeFile", "deleteFile", "renameFile", "translateText",
- "deleteFolder", "renameFolder", "moveFolder", "copyFile", "replaceInFile",
- "listConnections", "uploadToExternal", "sendMail", "downloadFromDataSource",
- "browseContainer", "readContentObjects", "extractContainerItem",
- "summarizeContent", "describeImage", "renderDocument",
- "textToSpeech", "generateImage", "createChart",
- "speechToText", "detectLanguage", "neutralizeData", "executeCode",
- "listWorkflowHistory", "readWorkflowMessages",
- }
- for _toolName in _CORE_ONLY_TOOLS:
- _td = registry.getTool(_toolName)
- if _td:
- _td.toolSet = "core"
diff --git a/modules/serviceCenter/services/serviceAgent/toolboxRegistry.py b/modules/serviceCenter/services/serviceAgent/toolboxRegistry.py
new file mode 100644
index 00000000..7646da11
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/toolboxRegistry.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Toolbox Registry for the Agent service.
+Manages thematic tool groupings (toolboxes) and the `requestToolbox` meta-tool.
+"""
+
+import logging
+from typing import Dict, List, Any, Optional, Set
+from pydantic import BaseModel, Field
+
+logger = logging.getLogger(__name__)
+
+
+class ToolboxDefinition(BaseModel):
+ """Definition of a thematic toolbox."""
+ id: str = Field(description="Unique toolbox identifier (e.g. 'core', 'email', 'workflow')")
+ label: str = Field(description="Human-readable label")
+ description: str = Field(default="", description="What this toolbox provides")
+ featureCode: Optional[str] = Field(default=None, description="Feature code if toolbox is feature-specific")
+ tools: List[str] = Field(default_factory=list, description="Tool names belonging to this toolbox")
+ isDefault: bool = Field(default=False, description="If true, toolbox is active by default")
+ requiresConnection: Optional[str] = Field(
+ default=None,
+ description="Connection authority required (e.g. 'microsoft', 'google'). None = always available."
+ )
+
+
+class ToolboxRegistry:
+ """Registry for toolbox definitions. Manages activation and tool lookup."""
+
+ def __init__(self):
+ self._toolboxes: Dict[str, ToolboxDefinition] = {}
+
+ def registerToolbox(self, toolbox: ToolboxDefinition) -> None:
+ """Register a toolbox definition."""
+ if toolbox.id in self._toolboxes:
+ logger.debug("Toolbox '%s' already registered, updating", toolbox.id)
+ self._toolboxes[toolbox.id] = toolbox
+ logger.debug("Registered toolbox: %s (%d tools, default=%s)", toolbox.id, len(toolbox.tools), toolbox.isDefault)
+
+ def getToolbox(self, toolboxId: str) -> Optional[ToolboxDefinition]:
+ """Get a toolbox by ID."""
+ return self._toolboxes.get(toolboxId)
+
+ def getAllToolboxes(self) -> List[ToolboxDefinition]:
+ """Get all registered toolboxes."""
+ return list(self._toolboxes.values())
+
+ def getDefaultToolboxes(self) -> List[ToolboxDefinition]:
+ """Get all default toolboxes (active at agent start)."""
+ return [tb for tb in self._toolboxes.values() if tb.isDefault]
+
+ def getActiveToolboxes(self, userConnections: List[str] = None) -> List[ToolboxDefinition]:
+ """
+ Get toolboxes available to the user based on their connections.
+ Toolboxes without requiresConnection are always available.
+ Toolboxes with requiresConnection are available only if the user has that connection.
+ """
+ available = []
+ connectionAuthorities: Set[str] = set(userConnections or [])
+ for tb in self._toolboxes.values():
+ if tb.requiresConnection is None:
+ available.append(tb)
+ elif tb.requiresConnection in connectionAuthorities:
+ available.append(tb)
+ return available
+
+ def getToolsForToolboxes(self, toolboxIds: List[str]) -> List[str]:
+ """Get the union of all tool names for the given toolbox IDs."""
+ tools: Set[str] = set()
+ for tbId in toolboxIds:
+ tb = self._toolboxes.get(tbId)
+ if tb:
+ tools.update(tb.tools)
+ return sorted(tools)
+
+ def getToolboxForTool(self, toolName: str) -> Optional[str]:
+ """Find which toolbox a tool belongs to."""
+ for tb in self._toolboxes.values():
+ if toolName in tb.tools:
+ return tb.id
+ return None
+
+ def toApiResponse(self, userConnections: List[str] = None) -> List[Dict[str, Any]]:
+ """Serialize available toolboxes for API response."""
+ available = self.getActiveToolboxes(userConnections)
+ return [
+ {
+ "id": tb.id,
+ "label": tb.label,
+ "description": tb.description,
+ "toolCount": len(tb.tools),
+ "isDefault": tb.isDefault,
+ "requiresConnection": tb.requiresConnection,
+ }
+ for tb in available
+ ]
+
+
+# Module-level singleton
+_toolboxRegistry = ToolboxRegistry()
+
+
+def getToolboxRegistry() -> ToolboxRegistry:
+ """Get the global toolbox registry singleton."""
+ return _toolboxRegistry
+
+
+def _registerDefaultToolboxes() -> None:
+ """Register the default set of toolboxes."""
+ defaults = [
+ ToolboxDefinition(
+ id="core",
+ label="Core Tools",
+ description="Basic agent tools: search, read, write, web",
+ isDefault=True,
+ tools=[
+ "readFile", "listFiles", "searchInFileContent", "listFolders",
+ "webSearch", "readUrl", "writeFile", "deleteFile", "renameFile",
+ "copyFile", "createFolder", "deleteFolder", "moveFile", "moveFolder",
+ "renameFolder", "tagFile", "replaceInFile", "translateText",
+ "detectLanguage", "queryFeatureInstance",
+ ],
+ ),
+ ToolboxDefinition(
+ id="ai",
+ label="AI Tools",
+ description="AI-powered analysis and generation",
+ isDefault=True,
+ tools=[
+ "summarizeContent", "describeImage", "generateImage",
+ "textToSpeech", "speechToText", "renderDocument",
+ "createChart", "executeCode", "neutralizeData",
+ ],
+ ),
+ ToolboxDefinition(
+ id="datasources",
+ label="Data Sources",
+ description="Access external data sources and databases",
+ isDefault=True,
+ tools=[
+ "listConnections", "browseDataSource", "searchDataSource",
+ "downloadFromDataSource", "uploadToExternal",
+ "browseContainer", "readContentObjects", "extractContainerItem",
+ ],
+ ),
+ ToolboxDefinition(
+ id="email",
+ label="Email",
+ description="Read and send emails via Outlook/Gmail",
+ requiresConnection="microsoft",
+ isDefault=False,
+ tools=[
+ "sendMail",
+ "outlook_readEmails", "outlook_searchEmails",
+ "outlook_composeAndDraftReply", "outlook_sendDraft",
+ ],
+ ),
+ ToolboxDefinition(
+ id="sharepoint",
+ label="SharePoint",
+ description="Access SharePoint sites, lists, and files",
+ requiresConnection="microsoft",
+ isDefault=False,
+ tools=[
+ "sharepoint_findDocuments", "sharepoint_readDocuments",
+ "sharepoint_upload",
+ ],
+ ),
+ ToolboxDefinition(
+ id="clickup",
+ label="ClickUp",
+ description="Manage ClickUp tasks and projects",
+ requiresConnection="clickup",
+ isDefault=False,
+ tools=[
+ "clickup_searchTasks", "clickup_createTask", "clickup_updateTask",
+ ],
+ ),
+ ToolboxDefinition(
+ id="jira",
+ label="Jira",
+ description="Manage Jira issues and projects",
+ requiresConnection="jira",
+ isDefault=False,
+ tools=[
+ "jira_connect", "jira_exportTickets", "jira_importTickets",
+ ],
+ ),
+ ToolboxDefinition(
+ id="workflow",
+ label="Workflow",
+ description="Graph manipulation tools for the visual editor",
+ featureCode="graphicalEditor",
+ isDefault=False,
+ tools=[
+ "readWorkflowGraph", "addNode", "removeNode", "connectNodes",
+ "setNodeParameter", "listAvailableNodeTypes", "validateGraph",
+ "listWorkflowHistory", "readWorkflowMessages",
+ ],
+ ),
+ ToolboxDefinition(
+ id="trustee",
+ label="Trustee / Accounting",
+ description="Trustee accounting tools: refresh data from external system (e.g. Abacus), query positions and journal entries",
+ featureCode="trustee",
+ isDefault=False,
+ tools=[
+ "trustee_refreshAccountingData",
+ ],
+ ),
+ ]
+ for tb in defaults:
+ _toolboxRegistry.registerToolbox(tb)
+
+
+_registerDefaultToolboxes()
+
+
+REQUEST_TOOLBOX_TOOL_NAME = "requestToolbox"
+
+
+def buildRequestToolboxDefinition(availableToolboxIds: List[str]) -> dict:
+ """Build the tool definition dict for the requestToolbox meta-tool."""
+ return {
+ "name": REQUEST_TOOLBOX_TOOL_NAME,
+ "description": (
+ "Request additional specialized tools for the current task. "
+ "Call this when you need tools from a specific toolbox that is not yet active. "
+ "After calling, the requested tools will be available in the next round."
+ ),
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "toolboxId": {
+ "type": "string",
+ "enum": availableToolboxIds,
+ "description": "ID of the toolbox to activate",
+ },
+ "reason": {
+ "type": "string",
+ "description": "Brief reason why this toolbox is needed",
+ },
+ },
+ "required": ["toolboxId"],
+ },
+ }
diff --git a/modules/serviceCenter/services/serviceAgent/workflowTools.py b/modules/serviceCenter/services/serviceAgent/workflowTools.py
new file mode 100644
index 00000000..a63abb65
--- /dev/null
+++ b/modules/serviceCenter/services/serviceAgent/workflowTools.py
@@ -0,0 +1,479 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Workflow Toolbox - AI-assisted graph manipulation tools for the GraphicalEditor.
+Tools: readWorkflowGraph, addNode, removeNode, connectNodes, setNodeParameter,
+ listAvailableNodeTypes, validateGraph, listWorkflowHistory, readWorkflowMessages.
+"""
+
+import logging
+import uuid
+from typing import Dict, Any, List, Optional
+
+from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolResult
+
+logger = logging.getLogger(__name__)
+
+TOOLBOX_ID = "workflow"
+
+
+async def _readWorkflowGraph(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Read the current workflow graph (nodes and connections)."""
+ try:
+ workflowId = params.get("workflowId")
+ instanceId = params.get("instanceId")
+ if not workflowId or not instanceId:
+ return ToolResult(success=False, error="workflowId and instanceId required")
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return ToolResult(success=False, error=f"Workflow {workflowId} not found")
+
+ graph = wf.get("graph", {})
+ nodes = graph.get("nodes", [])
+ connections = graph.get("connections", [])
+ return ToolResult(
+ success=True,
+ data={
+ "workflowId": workflowId,
+ "label": wf.get("label", ""),
+ "nodeCount": len(nodes),
+ "connectionCount": len(connections),
+ "nodes": [{"id": n.get("id"), "type": n.get("type"), "title": n.get("title", "")} for n in nodes],
+ "connections": connections,
+ },
+ )
+ except Exception as e:
+ logger.exception("readWorkflowGraph failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _addNode(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Add a node to the workflow graph."""
+ try:
+ workflowId = params.get("workflowId")
+ instanceId = params.get("instanceId")
+ nodeType = params.get("nodeType")
+ if not workflowId or not instanceId or not nodeType:
+ return ToolResult(success=False, error="workflowId, instanceId, and nodeType required")
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return ToolResult(success=False, error=f"Workflow {workflowId} not found")
+
+ graph = dict(wf.get("graph", {}))
+ nodes = list(graph.get("nodes", []))
+
+ nodeId = params.get("nodeId") or str(uuid.uuid4())[:8]
+ title = params.get("title", "")
+ nodeParams = params.get("parameters", {})
+ position = params.get("position", {"x": len(nodes) * 200, "y": 100})
+
+ newNode = {
+ "id": nodeId,
+ "type": nodeType,
+ "title": title,
+ "parameters": nodeParams,
+ "position": position,
+ }
+ nodes.append(newNode)
+ graph["nodes"] = nodes
+
+ iface.updateWorkflow(workflowId, {"graph": graph})
+ return ToolResult(
+ success=True,
+ data={"nodeId": nodeId, "nodeType": nodeType, "message": f"Node '{title or nodeType}' added"},
+ )
+ except Exception as e:
+ logger.exception("addNode failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _removeNode(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Remove a node and its connections from the workflow graph."""
+ try:
+ workflowId = params.get("workflowId")
+ instanceId = params.get("instanceId")
+ nodeId = params.get("nodeId")
+ if not workflowId or not instanceId or not nodeId:
+ return ToolResult(success=False, error="workflowId, instanceId, and nodeId required")
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return ToolResult(success=False, error=f"Workflow {workflowId} not found")
+
+ graph = dict(wf.get("graph", {}))
+ nodes = [n for n in graph.get("nodes", []) if n.get("id") != nodeId]
+ connections = [
+ c for c in graph.get("connections", [])
+ if c.get("source") != nodeId and c.get("target") != nodeId
+ ]
+ graph["nodes"] = nodes
+ graph["connections"] = connections
+
+ iface.updateWorkflow(workflowId, {"graph": graph})
+ return ToolResult(success=True, data={"nodeId": nodeId, "message": f"Node {nodeId} removed"})
+ except Exception as e:
+ logger.exception("removeNode failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _connectNodes(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Connect two nodes in the workflow graph."""
+ try:
+ workflowId = params.get("workflowId")
+ instanceId = params.get("instanceId")
+ sourceId = params.get("sourceId")
+ targetId = params.get("targetId")
+ if not workflowId or not instanceId or not sourceId or not targetId:
+ return ToolResult(success=False, error="workflowId, instanceId, sourceId, and targetId required")
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return ToolResult(success=False, error=f"Workflow {workflowId} not found")
+
+ graph = dict(wf.get("graph", {}))
+ connections = list(graph.get("connections", []))
+ newConn = {
+ "source": sourceId,
+ "target": targetId,
+ "sourceOutput": params.get("sourceOutput", 0),
+ "targetInput": params.get("targetInput", 0),
+ }
+ connections.append(newConn)
+ graph["connections"] = connections
+
+ iface.updateWorkflow(workflowId, {"graph": graph})
+ return ToolResult(success=True, data={"connection": newConn, "message": f"Connected {sourceId} -> {targetId}"})
+ except Exception as e:
+ logger.exception("connectNodes failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _setNodeParameter(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Set a parameter on a node."""
+ try:
+ workflowId = params.get("workflowId")
+ instanceId = params.get("instanceId")
+ nodeId = params.get("nodeId")
+ paramName = params.get("parameterName")
+ paramValue = params.get("parameterValue")
+ if not workflowId or not instanceId or not nodeId or not paramName:
+ return ToolResult(success=False, error="workflowId, instanceId, nodeId, and parameterName required")
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return ToolResult(success=False, error=f"Workflow {workflowId} not found")
+
+ graph = dict(wf.get("graph", {}))
+ nodes = list(graph.get("nodes", []))
+ found = False
+ for n in nodes:
+ if n.get("id") == nodeId:
+ nodeParams = dict(n.get("parameters", {}))
+ nodeParams[paramName] = paramValue
+ n["parameters"] = nodeParams
+ found = True
+ break
+
+ if not found:
+ return ToolResult(success=False, error=f"Node {nodeId} not found in graph")
+
+ graph["nodes"] = nodes
+ iface.updateWorkflow(workflowId, {"graph": graph})
+ return ToolResult(success=True, data={"nodeId": nodeId, "parameter": paramName, "message": f"Parameter '{paramName}' set"})
+ except Exception as e:
+ logger.exception("setNodeParameter failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _listAvailableNodeTypes(params: Dict[str, Any], context: Any) -> ToolResult:
+ """List all available node types for the flow builder."""
+ try:
+ from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+ nodeTypes = [
+ {"id": n.get("id"), "category": n.get("category"), "label": n.get("label", {}).get("en", n.get("id"))}
+ for n in STATIC_NODE_TYPES
+ ]
+ return ToolResult(success=True, data={"nodeTypes": nodeTypes, "count": len(nodeTypes)})
+ except Exception as e:
+ logger.exception("listAvailableNodeTypes failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _validateGraph(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Validate a workflow graph for common issues."""
+ try:
+ workflowId = params.get("workflowId")
+ instanceId = params.get("instanceId")
+ if not workflowId or not instanceId:
+ return ToolResult(success=False, error="workflowId and instanceId required")
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return ToolResult(success=False, error=f"Workflow {workflowId} not found")
+
+ graph = wf.get("graph", {})
+ nodes = graph.get("nodes", [])
+ connections = graph.get("connections", [])
+ issues: List[str] = []
+
+ nodeIds = {n.get("id") for n in nodes}
+ if not nodes:
+ issues.append("Graph has no nodes")
+
+ hasTrigger = any(n.get("type", "").startswith("trigger.") for n in nodes)
+ if not hasTrigger:
+ issues.append("No trigger node found")
+
+ for c in connections:
+ if c.get("source") not in nodeIds:
+ issues.append(f"Connection source '{c.get('source')}' not found")
+ if c.get("target") not in nodeIds:
+ issues.append(f"Connection target '{c.get('target')}' not found")
+
+ connectedNodes = set()
+ for c in connections:
+ connectedNodes.add(c.get("source"))
+ connectedNodes.add(c.get("target"))
+ orphans = [n.get("id") for n in nodes if n.get("id") not in connectedNodes and not n.get("type", "").startswith("trigger.")]
+ if orphans:
+ issues.append(f"Orphan nodes (not connected): {', '.join(orphans)}")
+
+ return ToolResult(
+ success=True,
+ data={
+ "valid": len(issues) == 0,
+ "issues": issues,
+ "nodeCount": len(nodes),
+ "connectionCount": len(connections),
+ },
+ )
+ except Exception as e:
+ logger.exception("validateGraph failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _listWorkflowHistory(params: Dict[str, Any], context: Any) -> ToolResult:
+ """List versions (history) for a workflow."""
+ try:
+ workflowId = params.get("workflowId", "")
+ instanceId = params.get("instanceId", "")
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ versions = iface.getVersions(workflowId)
+ return ToolResult(
+ success=True,
+ data={
+ "workflowId": workflowId,
+ "versions": [
+ {
+ "id": v.get("id"),
+ "versionNumber": v.get("versionNumber"),
+ "status": v.get("status"),
+ "publishedAt": v.get("publishedAt"),
+ "publishedBy": v.get("publishedBy"),
+ }
+ for v in versions
+ ],
+ },
+ )
+ except Exception as e:
+ logger.exception("listWorkflowHistory failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+async def _readWorkflowMessages(params: Dict[str, Any], context: Any) -> ToolResult:
+ """Read recent run logs/messages for a workflow."""
+ try:
+ workflowId = params.get("workflowId", "")
+ instanceId = params.get("instanceId", "")
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ user = getattr(context, "user", None)
+ mandateId = getattr(context, "mandateId", "") or ""
+ iface = getGraphicalEditorInterface(user, mandateId, instanceId)
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoRun
+ runs = iface.db.getRecordset(AutoRun, recordFilter={"workflowId": workflowId}) or []
+ runSummaries = []
+ for r in sorted(runs, key=lambda x: x.get("startedAt") or 0, reverse=True)[:10]:
+ runSummaries.append({
+ "runId": r.get("id"),
+ "status": r.get("status"),
+ "startedAt": r.get("startedAt"),
+ "completedAt": r.get("completedAt"),
+ "error": r.get("error"),
+ })
+ return ToolResult(
+ success=True,
+ data={"workflowId": workflowId, "recentRuns": runSummaries},
+ )
+ except Exception as e:
+ logger.exception("readWorkflowMessages failed: %s", e)
+ return ToolResult(success=False, error=str(e))
+
+
+def getWorkflowToolDefinitions() -> List[Dict[str, Any]]:
+ """Return tool definitions for registration in the ToolRegistry."""
+ return [
+ {
+ "name": "readWorkflowGraph",
+ "handler": _readWorkflowGraph,
+ "description": "Read the current workflow graph (nodes and connections)",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string", "description": "Workflow ID"},
+ "instanceId": {"type": "string", "description": "Feature instance ID"},
+ },
+ "required": ["workflowId", "instanceId"],
+ },
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "addNode",
+ "handler": _addNode,
+ "description": "Add a node to the workflow graph",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ "nodeType": {"type": "string", "description": "Node type (e.g. ai.chat, email.send)"},
+ "title": {"type": "string", "description": "Human-readable title"},
+ "parameters": {"type": "object", "description": "Node parameters"},
+ "position": {"type": "object", "description": "Canvas position {x, y}"},
+ },
+ "required": ["workflowId", "instanceId", "nodeType"],
+ },
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "removeNode",
+ "handler": _removeNode,
+ "description": "Remove a node and its connections from the graph",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ "nodeId": {"type": "string", "description": "ID of the node to remove"},
+ },
+ "required": ["workflowId", "instanceId", "nodeId"],
+ },
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "connectNodes",
+ "handler": _connectNodes,
+ "description": "Connect two nodes in the graph",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ "sourceId": {"type": "string"},
+ "targetId": {"type": "string"},
+ "sourceOutput": {"type": "integer", "default": 0},
+ "targetInput": {"type": "integer", "default": 0},
+ },
+ "required": ["workflowId", "instanceId", "sourceId", "targetId"],
+ },
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "setNodeParameter",
+ "handler": _setNodeParameter,
+ "description": "Set a parameter on a node",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ "nodeId": {"type": "string"},
+ "parameterName": {"type": "string"},
+ "parameterValue": {"description": "Value to set (any type)"},
+ },
+ "required": ["workflowId", "instanceId", "nodeId", "parameterName", "parameterValue"],
+ },
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "listAvailableNodeTypes",
+ "handler": _listAvailableNodeTypes,
+ "description": "List all available node types for the flow builder",
+ "parameters": {"type": "object", "properties": {}},
+ "readOnly": True,
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "validateGraph",
+ "handler": _validateGraph,
+ "description": "Validate a workflow graph for common issues",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ },
+ "required": ["workflowId", "instanceId"],
+ },
+ "readOnly": True,
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "listWorkflowHistory",
+ "handler": _listWorkflowHistory,
+ "description": "List version history for a workflow (AutoVersion entries)",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ },
+ "required": ["workflowId", "instanceId"],
+ },
+ "readOnly": True,
+ "toolSet": TOOLBOX_ID,
+ },
+ {
+ "name": "readWorkflowMessages",
+ "handler": _readWorkflowMessages,
+ "description": "Read recent run logs and status for a workflow",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "workflowId": {"type": "string"},
+ "instanceId": {"type": "string"},
+ },
+ "required": ["workflowId", "instanceId"],
+ },
+ "readOnly": True,
+ "toolSet": TOOLBOX_ID,
+ },
+ ]
diff --git a/modules/serviceCenter/services/serviceAi/subContentExtraction.py b/modules/serviceCenter/services/serviceAi/subContentExtraction.py
index 1d1236da..e050bb67 100644
--- a/modules/serviceCenter/services/serviceAi/subContentExtraction.py
+++ b/modules/serviceCenter/services/serviceAi/subContentExtraction.py
@@ -430,7 +430,7 @@ class ContentExtractor:
# Debug-Log (harmonisiert)
self.services.utils.writeDebugFile(
- json.dumps([part.dict() for part in allContentParts], indent=2, default=str),
+ json.dumps([part.model_dump() for part in allContentParts], indent=2, default=str),
"content_extraction_result"
)
diff --git a/modules/serviceCenter/services/serviceAi/subDocumentIntents.py b/modules/serviceCenter/services/serviceAi/subDocumentIntents.py
index 42dfef14..7a462177 100644
--- a/modules/serviceCenter/services/serviceAi/subDocumentIntents.py
+++ b/modules/serviceCenter/services/serviceAi/subDocumentIntents.py
@@ -105,7 +105,7 @@ class DocumentIntentAnalyzer:
# Debug-Log (harmonisiert)
self.services.utils.writeDebugFile(
- json.dumps([intent.dict() for intent in documentIntents], indent=2),
+ json.dumps([intent.model_dump() for intent in documentIntents], indent=2),
"document_intent_analysis_result"
)
diff --git a/modules/serviceCenter/services/serviceAi/subStructureFilling.py b/modules/serviceCenter/services/serviceAi/subStructureFilling.py
index 6ba32dfd..b31bc32d 100644
--- a/modules/serviceCenter/services/serviceAi/subStructureFilling.py
+++ b/modules/serviceCenter/services/serviceAi/subStructureFilling.py
@@ -18,6 +18,12 @@ from modules.datamodels.datamodelExtraction import ContentPart
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
+
+class _AiResponseFallback:
+ """Lightweight wrapper used when AI JSON parsing fails but raw content must be preserved."""
+ def __init__(self, content):
+ self.content = content
+
logger = logging.getLogger(__name__)
@@ -719,12 +725,8 @@ class StructureFiller:
self.services.chat.progressLogUpdate(sectionOperationId, 0.8, "Validating generated content")
- class _AiResponse:
- def __init__(self, content):
- self.content = content
-
responseElements = await self._processAiResponseForSection(
- aiResponse=_AiResponse(aiResponseJson),
+ aiResponse=_AiResponseFallback(aiResponseJson),
contentType=contentType,
operationType=operationType,
sectionId=sectionId,
@@ -1032,17 +1034,10 @@ class StructureFiller:
else:
generatedElements = []
- class AiResponse:
- def __init__(self, content):
- self.content = content
-
- aiResponse = AiResponse(aiResponseJson)
+ aiResponse = _AiResponseFallback(aiResponseJson)
except Exception as parseError:
logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}")
- class AiResponse:
- def __init__(self, content):
- self.content = content
- aiResponse = AiResponse(aiResponseJson)
+ aiResponse = _AiResponseFallback(aiResponseJson)
generatedElements = []
self.services.chat.progressLogUpdate(sectionOperationId, 0.6, "Processing AI response")
@@ -1200,17 +1195,10 @@ class StructureFiller:
else:
generatedElements = []
- class AiResponse:
- def __init__(self, content):
- self.content = content
-
- aiResponse = AiResponse(aiResponseJson)
+ aiResponse = _AiResponseFallback(aiResponseJson)
except Exception as parseError:
logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}")
- class AiResponse:
- def __init__(self, content):
- self.content = content
- aiResponse = AiResponse(aiResponseJson)
+ aiResponse = _AiResponseFallback(aiResponseJson)
generatedElements = []
self.services.chat.progressLogUpdate(sectionOperationId, 0.6, "Processing AI response")
@@ -1467,17 +1455,10 @@ class StructureFiller:
else:
generatedElements = []
- class AiResponse:
- def __init__(self, content):
- self.content = content
-
- aiResponse = AiResponse(aiResponseJson)
+ aiResponse = _AiResponseFallback(aiResponseJson)
except Exception as parseError:
logger.error(f"Error parsing response from _callAiWithLooping for section {sectionId}: {str(parseError)}")
- class AiResponse:
- def __init__(self, content):
- self.content = content
- aiResponse = AiResponse(aiResponseJson)
+ aiResponse = _AiResponseFallback(aiResponseJson)
generatedElements = []
self.services.chat.progressLogUpdate(sectionOperationId, 0.6, "Processing AI response")
@@ -2070,7 +2051,7 @@ class StructureFiller:
contentPartInstructions: Dict[str, Any],
contentParts: List[ContentPart],
userPrompt: str,
- language: str = "en",
+ language: str = "de",
outputFormat: str = "txt"
) -> str:
"""Baue Prompt für Chapter-Sections-Struktur-Generierung, querying renderer for accepted section types."""
@@ -2206,7 +2187,7 @@ Return only valid JSON. Do not include any explanatory text outside the JSON.
allSections: Optional[List[Dict[str, Any]]] = None,
sectionIndex: Optional[int] = None,
isAggregation: bool = False,
- language: str = "en",
+ language: str = "de",
outputFormat: str = "txt",
preExtractedText: Optional[str] = None
) -> tuple[str, str]:
diff --git a/modules/serviceCenter/services/serviceAi/subStructureGeneration.py b/modules/serviceCenter/services/serviceAi/subStructureGeneration.py
index 72127c92..3d531756 100644
--- a/modules/serviceCenter/services/serviceAi/subStructureGeneration.py
+++ b/modules/serviceCenter/services/serviceAi/subStructureGeneration.py
@@ -14,6 +14,7 @@ from typing import Dict, Any, List, Optional
from modules.datamodels.datamodelExtraction import ContentPart
from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.workflows.processing.shared.stateTools import checkWorkflowStopped
+from modules.shared.i18nRegistry import normalizePrimaryLanguageTag
logger = logging.getLogger(__name__)
@@ -262,18 +263,17 @@ CRITICAL:
# Validation 3.5 & 3.6: Document language
# Use validated currentUserLanguage (always valid, validated during user intention analysis)
# Access via _getUserLanguage() which uses self.services.currentUserLanguage
- userPromptLanguage = self._getUserLanguage() # Uses validated currentUserLanguage infrastructure
-
- if "language" not in doc or not isinstance(doc["language"], str) or len(doc["language"]) != 2:
- # AI didn't return language or invalid format - use validated currentUserLanguage
+ userPromptLanguage = normalizePrimaryLanguageTag(self._getUserLanguage(), "de")
+
+ raw_lang = doc.get("language")
+ if not isinstance(raw_lang, str) or not str(raw_lang).strip():
doc["language"] = userPromptLanguage
if "language" not in doc:
logger.warning(f"Document {doc.get('id')} missing language - using currentUserLanguage: {userPromptLanguage}")
else:
- logger.warning(f"Document {doc.get('id')} has invalid language format from AI: {doc['language']}, using currentUserLanguage")
+ logger.warning(f"Document {doc.get('id')} has invalid language format from AI: {doc.get('language')}, using currentUserLanguage")
else:
- # AI returned valid language format - normalize
- doc["language"] = doc["language"].lower().strip()[:2]
+ doc["language"] = normalizePrimaryLanguageTag(str(raw_lang), userPromptLanguage)
logger.debug(f"Document {doc.get('id')} using AI-determined language: {doc['language']}")
# Validation 3.7: Document missing 'chapters' field
diff --git a/modules/serviceCenter/services/serviceChat/mainServiceChat.py b/modules/serviceCenter/services/serviceChat/mainServiceChat.py
index f3a74b1e..b436d3e3 100644
--- a/modules/serviceCenter/services/serviceChat/mainServiceChat.py
+++ b/modules/serviceCenter/services/serviceChat/mainServiceChat.py
@@ -371,30 +371,36 @@ class ChatService:
return None
def getUserConnectionFromConnectionReference(self, connectionReference: str) -> Optional[UserConnection]:
- """Get UserConnection from reference string (handles new format without UUID)"""
+ """Get UserConnection from reference string.
+
+ Supported formats:
+ - connection:{authority}:{username} [status:..., token:...]
+ - A raw UUID (fallback: lookup by connection ID)
+ """
try:
- # Parse reference format: connection:{authority}:{username} [status:..., token:...]
- # Remove state information if present
- base_reference = connectionReference.split(' [')[0]
-
+ base_reference = connectionReference.split(' [')[0].strip()
+
parts = base_reference.split(':')
- if len(parts) != 3 or parts[0] != "connection":
- return None
-
- authority = parts[1]
- username = parts[2]
-
- # Get user connections through AppObjects interface
+ if len(parts) == 3 and parts[0] == "connection":
+ authority = parts[1]
+ username = parts[2]
+ user_connections = self.interfaceDbApp.getUserConnections(self.user.id)
+ for conn in user_connections:
+ connAuthority = conn.authority.value if hasattr(conn.authority, "value") else str(conn.authority)
+ if connAuthority == authority and conn.externalUsername == username:
+ return conn
+
+ # Fallback: treat the reference as a connection ID (UUID)
user_connections = self.interfaceDbApp.getUserConnections(self.user.id)
-
- # Find matching connection by authority and username (no UUID needed)
for conn in user_connections:
- if conn.authority.value == authority and conn.externalUsername == username:
+ connId = conn.get("id") if isinstance(conn, dict) else getattr(conn, "id", None)
+ if connId and str(connId) == base_reference:
return conn
+
return None
-
+
except Exception as e:
- logger.error(f"Error parsing connection reference: {str(e)}")
+ logger.error(f"Error parsing connection reference '{connectionReference}': {str(e)}")
return None
def getFreshConnectionToken(self, connectionId: str):
diff --git a/modules/serviceCenter/services/serviceExtraction/subRegistry.py b/modules/serviceCenter/services/serviceExtraction/subRegistry.py
index cd14b0d7..826eef9d 100644
--- a/modules/serviceCenter/services/serviceExtraction/subRegistry.py
+++ b/modules/serviceCenter/services/serviceExtraction/subRegistry.py
@@ -139,6 +139,40 @@ class ExtractorRegistry:
return self._map[ext]
return self._fallback
+ def getExtensionToMimeMap(self) -> Dict[str, str]:
+ """Build a map from file extension (without dot) to primary MIME type.
+
+ Iterates all registered extractors and pairs each declared extension
+ with the first MIME type declared by the same extractor. Specialized
+ extractors (Pdf, Docx, …) are processed first so their mapping wins
+ over broad extractors like TextExtractor.
+ """
+ extMap: Dict[str, str] = {}
+ seen: set = set()
+
+ # Collect unique extractor instances (same instance registered under many keys)
+ extractors: list[Extractor] = []
+ for ext in self._map.values():
+ eid = id(ext)
+ if eid not in seen:
+ seen.add(eid)
+ extractors.append(ext)
+
+ # Specialized (few extensions) first so they win over broad ones
+ extractors.sort(key=lambda e: len(e.getSupportedExtensions()))
+
+ for ext in extractors:
+ extensions = ext.getSupportedExtensions()
+ mimeTypes = ext.getSupportedMimeTypes()
+ if not extensions or not mimeTypes:
+ continue
+ primaryMime = mimeTypes[0]
+ for rawExt in extensions:
+ key = rawExt.lstrip('.').lower()
+ if key not in extMap:
+ extMap[key] = primaryMime
+ return extMap
+
def getAllSupportedFormats(self) -> Dict[str, Dict[str, list[str]]]:
"""
Get all supported formats from all registered extractors.
diff --git a/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py b/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py
index 99da173e..b9377404 100644
--- a/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py
+++ b/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py
@@ -99,11 +99,7 @@ class GenerationService:
if mime_type == "application/json":
# Erstelle ActionDocument-Format mit validationMetadata und documentData
if hasattr(document_data, 'model_dump'):
- # Pydantic v2
document_data_dict = document_data.model_dump()
- elif hasattr(document_data, 'dict'):
- # Pydantic v1
- document_data_dict = document_data.dict()
elif isinstance(document_data, dict):
document_data_dict = document_data
elif isinstance(document_data, str):
diff --git a/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py b/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py
index 62e72c69..c2438fc0 100644
--- a/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py
+++ b/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py
@@ -130,8 +130,8 @@ class StructureGenerator:
# Convert ContentParts to dict format for JSON serialization
contentPartsList = []
for part in contentParts:
- if hasattr(part, 'dict'):
- partDict = part.dict()
+ if hasattr(part, 'model_dump'):
+ partDict = part.model_dump()
elif isinstance(part, dict):
partDict = part
else:
diff --git a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py
index 9404a567..378c83cf 100644
--- a/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py
+++ b/modules/serviceCenter/services/serviceKnowledge/mainServiceKnowledge.py
@@ -146,12 +146,15 @@ class KnowledgeService:
# 3. Chunk text content objects and create embeddings
textObjects = [o for o in contentObjects if o.get("contentType") == "text"]
- if _shouldNeutralize and textObjects:
- _neutralizedObjects = []
+ _neutralSvc = None
+ if _shouldNeutralize:
try:
_neutralSvc = self._getService("neutralization")
except Exception:
- _neutralSvc = None
+ logger.warning(f"Neutralization service unavailable for file {fileId}")
+
+ if _shouldNeutralize and textObjects:
+ _neutralizedObjects = []
if _neutralSvc:
for _obj in textObjects:
_textContent = (_obj.get("data", "") or "").strip()
@@ -201,7 +204,7 @@ class KnowledgeService:
# 4. Store non-text content objects (images, etc.) without embedding
nonTextObjects = [o for o in contentObjects if o.get("contentType") != "text"]
- if _shouldNeutralize and nonTextObjects:
+ if _shouldNeutralize and nonTextObjects and _neutralSvc:
import base64 as _b64
_filteredNonText = []
for _obj in nonTextObjects:
diff --git a/modules/serviceCenter/services/serviceMessaging/subscriptions/subSubscriptionGraphicalEditorRunFailed.py b/modules/serviceCenter/services/serviceMessaging/subscriptions/subSubscriptionGraphicalEditorRunFailed.py
new file mode 100644
index 00000000..2d77fd5b
--- /dev/null
+++ b/modules/serviceCenter/services/serviceMessaging/subscriptions/subSubscriptionGraphicalEditorRunFailed.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Subscription handler for GraphicalEditor workflow run failures.
+Sends email notifications to subscribed users when a workflow run fails.
+"""
+
+from typing import List
+from modules.datamodels.datamodelMessaging import (
+ MessagingEventParameters,
+ MessagingSubscriptionExecutionResult,
+ MessagingSubscriptionRegistration,
+ MessagingChannel,
+)
+
+
+def execute(
+ eventParameters: MessagingEventParameters,
+ registrations: List[MessagingSubscriptionRegistration],
+ messagingService,
+) -> MessagingSubscriptionExecutionResult:
+ """
+ Subscription function for GraphicalEditor run failures.
+ Sends email/SMS to registered users when a workflow run fails.
+ """
+ triggerData = eventParameters.triggerData or {}
+ workflowId = triggerData.get("workflowId", "Unknown")
+ workflowLabel = triggerData.get("workflowLabel", workflowId)
+ runId = triggerData.get("runId", "Unknown")
+ error = triggerData.get("error", "Unknown error")
+ mandateId = triggerData.get("mandateId", "")
+
+ emailRegistrations = [r for r in registrations if r.channel == MessagingChannel.EMAIL]
+ smsRegistrations = [r for r in registrations if r.channel == MessagingChannel.SMS]
+
+ emailSubject = f"Workflow fehlgeschlagen: {workflowLabel}"
+ emailMessage = (
+ f"Ein Workflow-Run ist fehlgeschlagen.\n\n"
+ f"Workflow: {workflowLabel}\n"
+ f"Workflow-ID: {workflowId}\n"
+ f"Run-ID: {runId}\n"
+ f"Fehler: {error}\n\n"
+ f"Bitte prüfen Sie den Workflow im Grafischen Editor."
+ )
+
+ smsMessage = f"Workflow '{workflowLabel}' fehlgeschlagen: {error[:100]}"
+
+ messagesSent = 0
+
+ for reg in emailRegistrations:
+ sendResult = messagingService.sendMessage(
+ subject=emailSubject,
+ message=emailMessage,
+ registration=reg,
+ )
+ if sendResult.success:
+ messagesSent += 1
+
+ for reg in smsRegistrations:
+ sendResult = messagingService.sendMessage(
+ subject="",
+ message=smsMessage,
+ registration=reg,
+ )
+ if sendResult.success:
+ messagesSent += 1
+
+ return MessagingSubscriptionExecutionResult(
+ success=True,
+ messagesSent=messagesSent,
+ )
diff --git a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py
index 89e20112..681070b0 100644
--- a/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py
+++ b/modules/serviceCenter/services/serviceSubscription/mainServiceSubscription.py
@@ -248,12 +248,13 @@ class SubscriptionService:
activeUsers = self._interface.countActiveUsers(mandateId)
activeInstances = self._interface.countActiveFeatureInstances(mandateId)
+ billableModules = max(0, activeInstances - plan.includedModules)
lineItems = []
if priceMapping.stripePriceIdUsers:
lineItems.append({"price": priceMapping.stripePriceIdUsers, "quantity": max(activeUsers, 1)})
- if priceMapping.stripePriceIdInstances and activeInstances > 0:
- lineItems.append({"price": priceMapping.stripePriceIdInstances, "quantity": activeInstances})
+ if priceMapping.stripePriceIdInstances and billableModules > 0:
+ lineItems.append({"price": priceMapping.stripePriceIdInstances, "quantity": billableModules})
if not returnUrl:
raise ValueError("returnUrl is required for paid subscription checkout")
@@ -546,7 +547,7 @@ def _notifySubscriptionChange(
try:
from modules.shared.notifyMandateAdmins import notifyMandateAdmins
- planLabel = (plan.title.get("de") or plan.title.get("en") or plan.planKey) if plan else "—"
+ planLabel = (plan.title or plan.planKey) if plan else "\u2014"
platformHint = f"Plattform: {platformUrl}" if platformUrl else ""
rawHtmlBlock: Optional[str] = None
@@ -641,11 +642,12 @@ def _buildInvoiceSummaryHtml(
subInterface = getSubRootInterface()
userCount = subInterface.countActiveUsers(mandateId)
instanceCount = subInterface.countActiveFeatureInstances(mandateId)
+ billableModules = max(0, instanceCount - plan.includedModules)
userPrice = plan.pricePerUserCHF
instancePrice = plan.pricePerFeatureInstanceCHF
userTotal = userCount * userPrice
- instanceTotal = instanceCount * instancePrice
+ instanceTotal = billableModules * instancePrice
netTotal = userTotal + instanceTotal
periodLabel = {"MONTHLY": "Monatlich", "YEARLY": "Jährlich"}.get(plan.billingPeriod, plan.billingPeriod)
@@ -660,10 +662,10 @@ def _buildInvoiceSummaryHtml(
f'{userCount} × {_chf(userPrice)} '
f'{_chf(userTotal)} \n'
)
- if instancePrice > 0:
+ if instancePrice > 0 and billableModules > 0:
rows += (
- f'Feature-Instanzen '
- f'{instanceCount} × {_chf(instancePrice)} '
+ f'Module ({instanceCount} total, {plan.includedModules} inkl.) '
+ f'{billableModules} × {_chf(instancePrice)} '
f'{_chf(instanceTotal)} \n'
)
@@ -807,8 +809,8 @@ class SubscriptionCapacityException(Exception):
) + _SUBSCRIPTION_LIMITS_UI_HINT_DE
elif resourceType == "featureInstances":
self.message = (
- f"Es sind höchstens {maxAllowed} aktive Feature-Instanzen erlaubt (derzeit {currentCount}). "
- f"Bitte Abonnement erweitern oder eine Instanz entfernen."
+ f"Es sind höchstens {maxAllowed} aktive Module erlaubt (derzeit {currentCount}). "
+ f"Bitte Abonnement erweitern oder ein Modul entfernen."
) + _SUBSCRIPTION_LIMITS_UI_HINT_DE
elif resourceType == "dataVolumeMB":
self.message = (
diff --git a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py
index 869ab52f..d26ef50e 100644
--- a/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py
+++ b/modules/serviceCenter/services/serviceSubscription/stripeBootstrap.py
@@ -3,10 +3,10 @@
"""
Auto-provision Stripe Products and Prices from the built-in plan catalog.
-Creates separate Stripe Products for user licenses and feature instances
+Creates separate Stripe Products for user licenses and modules
so that invoice line items show clear, descriptive names:
- "Benutzer-Lizenzen"
- - "Feature-Instanzen"
+ - "Module"
Idempotent — safe to call on every startup.
@@ -279,7 +279,7 @@ def bootstrapStripePrices() -> None:
reconciledInstances = _reconcilePrice(
stripe, mapping.stripeProductIdInstances, mapping.stripePriceIdInstances,
- plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Feature-Instanz",
+ plan.pricePerFeatureInstanceCHF, interval, f"{planKey} — Modul",
intervalCount,
)
if reconciledInstances != mapping.stripePriceIdInstances:
@@ -320,7 +320,7 @@ def bootstrapStripePrices() -> None:
productIdUsers = _findStripeProduct(stripe, planKey, "users")
if not productIdUsers:
productIdUsers = _createStripeProduct(
- stripe, "Benutzer-Lizenzen", f"Benutzer-Lizenzen für {plan.title.get('de', planKey)}",
+ stripe, "Benutzer-Lizenzen", f"Benutzer-Lizenzen für {plan.title or planKey}",
planKey, "users",
)
userCents = int(round(plan.pricePerUserCHF * 100))
@@ -338,7 +338,7 @@ def bootstrapStripePrices() -> None:
productIdInstances = _findStripeProduct(stripe, planKey, "instances")
if not productIdInstances:
productIdInstances = _createStripeProduct(
- stripe, "Feature-Instanzen", f"Feature-Instanzen für {plan.title.get('de', planKey)}",
+ stripe, "Module", f"Module für {plan.title or planKey}",
planKey, "instances",
)
instCents = int(round(plan.pricePerFeatureInstanceCHF * 100))
@@ -348,7 +348,7 @@ def bootstrapStripePrices() -> None:
if not priceIdInstances:
priceIdInstances = _createStripePrice(
stripe, productIdInstances, plan.pricePerFeatureInstanceCHF, interval,
- f"{planKey} — Feature-Instanz",
+ f"{planKey} — Modul",
intervalCount,
)
_archiveOtherRecurringPrices(
diff --git a/modules/shared/attributeUtils.py b/modules/shared/attributeUtils.py
index 239e214d..46333cc0 100644
--- a/modules/shared/attributeUtils.py
+++ b/modules/shared/attributeUtils.py
@@ -25,81 +25,81 @@ class AttributeDefinition(BaseModel):
description: Optional[str] = None
required: bool = False
default: Any = None
- options: Optional[Union[str, List[Any]]] = None # Can be a string reference (e.g., "user.role") or a list of options
+ options: Optional[Union[str, List[Any]]] = None
validation: Optional[Dict[str, Any]] = None
ui: Optional[Dict[str, Any]] = None
- # New frontend metadata fields
readonly: bool = False
editable: bool = True
visible: bool = True
order: int = 0
placeholder: Optional[str] = None
+ fkSource: Optional[str] = None
+ fkDisplayField: Optional[str] = None
-# Global registry for model labels
-MODEL_LABELS: Dict[str, Dict[str, Dict[str, str]]] = {}
+def _getModelLabelEntry(modelName: str) -> Dict[str, Any]:
+ """Resolve label data produced by @i18nModel (see modules.shared.i18nRegistry.MODEL_LABELS)."""
+ try:
+ from modules.shared.i18nRegistry import MODEL_LABELS as i18nModelLabels
+ except ImportError:
+ return {}
+ return i18nModelLabels.get(modelName) or {}
-def registerModelLabels(modelName: str, modelLabel: Dict[str, str], labels: Dict[str, Dict[str, str]]):
+def getModelLabels(modelName: str) -> Dict[str, str]:
+ """Get labels for a model's attributes in the specified language.
+
+ Reads @i18nModel registration (German base strings); resolves via resolveText().
"""
- Register labels for a model's attributes and the model itself.
-
- Args:
- modelName: Name of the model class
- modelLabel: Dictionary mapping language codes to model labels
- e.g. {"en": "Prompt", "fr": "Invite"}
- labels: Dictionary mapping attribute names to their translations
- e.g. {"name": {"en": "Name", "fr": "Nom"}}
- """
- MODEL_LABELS[modelName] = {"model": modelLabel, "attributes": labels}
-
-
-def getModelLabels(modelName: str, language: str = "en") -> Dict[str, str]:
- """
- Get labels for a model's attributes in the specified language.
-
- Args:
- modelName: Name of the model class
- language: Language code (default: "en")
-
- Returns:
- Dictionary mapping attribute names to their labels in the specified language
- """
- modelData = MODEL_LABELS.get(modelName, {})
+ modelData = _getModelLabelEntry(modelName)
attributeLabels = modelData.get("attributes", {})
- return {
- attr: translations.get(language, translations.get("en", attr))
- for attr, translations in attributeLabels.items()
- }
+ from modules.shared.i18nRegistry import resolveText
+ result: Dict[str, str] = {}
+ for attr, translations in attributeLabels.items():
+ resolved = resolveText(translations)
+ result[attr] = resolved if resolved else f"[{attr}]"
+ return result
-def _mergedAttributeLabels(modelClass: Type[BaseModel], userLanguage: str) -> Dict[str, str]:
+def _resolveOptionLabels(options):
+ """Resolve frontend_options label values via resolveText().
+
+ CRITICAL: deep-copy so the shared json_schema_extra dicts are never mutated.
+ Without the copy, each request would re-translate the already-translated
+ label, wrapping it in another layer of ``[…]`` brackets.
+ """
+ if not isinstance(options, list):
+ return options
+ import copy
+ from modules.shared.i18nRegistry import resolveText
+ resolved = copy.deepcopy(options)
+ for opt in resolved:
+ if not isinstance(opt, dict) or "label" not in opt:
+ continue
+ opt["label"] = resolveText(opt["label"])
+ return resolved
+
+
+def _mergedAttributeLabels(modelClass: Type[BaseModel]) -> Dict[str, str]:
"""Merge attribute labels from model MRO (base classes first, subclass overrides)."""
try:
baseIdx = modelClass.__mro__.index(BaseModel)
except ValueError:
- return getModelLabels(modelClass.__name__, userLanguage)
+ return getModelLabels(modelClass.__name__)
merged: Dict[str, str] = {}
for cls in reversed(modelClass.__mro__[:baseIdx]):
- merged.update(getModelLabels(cls.__name__, userLanguage))
+ merged.update(getModelLabels(cls.__name__))
return merged
-def getModelLabel(modelName: str, language: str = "en") -> str:
- """
- Get the label for a model in the specified language.
-
- Args:
- modelName: Name of the model class
- language: Language code (default: "en")
-
- Returns:
- Model label in the specified language, or model name if no label exists
- """
- modelData = MODEL_LABELS.get(modelName, {})
+def getModelLabel(modelName: str) -> str:
+ """Get the label for a model via resolveText()."""
+ modelData = _getModelLabelEntry(modelName)
modelLabel = modelData.get("model", {})
- return modelLabel.get(language, modelLabel.get("en", modelName))
+ from modules.shared.i18nRegistry import resolveText
+ resolved = resolveText(modelLabel)
+ return resolved if resolved else f"[{modelName}]"
def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguage: str = "en") -> Dict[str, Any]:
@@ -118,8 +118,8 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag
attributes = []
model_name = modelClass.__name__
- labels = _mergedAttributeLabels(modelClass, userLanguage)
- model_label = getModelLabel(model_name, userLanguage)
+ labels = _mergedAttributeLabels(modelClass)
+ model_label = getModelLabel(model_name)
# Pydantic v2 only
fields = modelClass.model_fields
@@ -257,7 +257,7 @@ def getModelAttributeDefinitions(modelClass: Type[BaseModel] = None, userLanguag
"visible": frontend_visible,
"order": len(attributes),
"readonly": frontend_readonly,
- "options": frontend_options,
+ "options": _resolveOptionLabels(frontend_options),
"default": field_default,
}
diff --git a/modules/shared/frontendTypes.py b/modules/shared/frontendTypes.py
index 9b6d04e7..9d73ee03 100644
--- a/modules/shared/frontendTypes.py
+++ b/modules/shared/frontendTypes.py
@@ -10,7 +10,7 @@ Custom types support dynamic option loading via API endpoints.
"""
from enum import Enum
-from typing import Dict, Any, Optional
+from typing import Dict, Optional
class FrontendType(str, Enum):
@@ -56,46 +56,50 @@ class FrontendType(str, Enum):
SHAREPOINT_FOLDER = "sharepointFolder"
"""SharePoint folder selector - requires connectionReference parameter in same action to load folders"""
-
- # Additional custom types can be added here as needed
- # Examples:
- # OUTLOOK_FOLDER = "outlookFolder"
- # JIRA_PROJECT = "jiraProject"
+
+ SHAREPOINT_FILE = "sharepointFile"
+ """SharePoint file selector - requires connectionReference parameter"""
+
+ CLICKUP_LIST = "clickupList"
+ """ClickUp list selector - requires connectionReference parameter"""
+
+ CLICKUP_TASK = "clickupTask"
+ """ClickUp task selector - requires connectionReference parameter"""
+
+ # Complex Structure Types (for graph editor node configs)
+ CASE_LIST = "caseList"
+ """Case list editor for flow.switch cases"""
+
+ FIELD_BUILDER = "fieldBuilder"
+ """Field builder for input.form field definitions"""
+
+ KEY_VALUE_ROWS = "keyValueRows"
+ """Key-value row editor for task update entries"""
+
+ CRON = "cron"
+ """Cron expression builder"""
+
+ CONDITION = "condition"
+ """Structured condition builder for flow.ifElse"""
+
+ MAPPING_TABLE = "mappingTable"
+ """Mapping table editor for data.transform"""
+
+ FILTER_EXPRESSION = "filterExpression"
+ """Filter expression builder for data.filter"""
# Mapping of custom types to their API endpoint for dynamic options
CUSTOM_TYPE_OPTIONS_API: Dict[FrontendType, str] = {
FrontendType.USER_CONNECTION: "user.connection",
- FrontendType.DOCUMENT_REFERENCE: "workflow.documentReference", # To be implemented
- FrontendType.WORKFLOW_ACTION: "workflow.action", # To be implemented
- FrontendType.SHAREPOINT_FOLDER: "sharepoint.folder", # Dynamic - requires connectionReference
+ FrontendType.DOCUMENT_REFERENCE: "workflow.documentReference",
+ FrontendType.WORKFLOW_ACTION: "workflow.action",
+ FrontendType.SHAREPOINT_FOLDER: "sharepoint.folder",
+ FrontendType.SHAREPOINT_FILE: "sharepoint.file",
+ FrontendType.CLICKUP_LIST: "clickup.list",
+ FrontendType.CLICKUP_TASK: "clickup.task",
}
-# Mapping of custom types to their description
-CUSTOM_TYPE_DESCRIPTIONS: Dict[FrontendType, Dict[str, str]] = {
- FrontendType.USER_CONNECTION: {
- "en": "User Connection",
- "fr": "Connexion utilisateur",
- "de": "Benutzerverbindung"
- },
- FrontendType.DOCUMENT_REFERENCE: {
- "en": "Document Reference",
- "fr": "Référence de document",
- "de": "Dokumentreferenz"
- },
- FrontendType.WORKFLOW_ACTION: {
- "en": "Workflow Action",
- "fr": "Action de workflow",
- "de": "Workflow-Aktion"
- },
- FrontendType.SHAREPOINT_FOLDER: {
- "en": "SharePoint Folder",
- "fr": "Dossier SharePoint",
- "de": "SharePoint-Ordner"
- },
-}
-
-
def getOptionsApiEndpoint(frontendType: FrontendType) -> Optional[str]:
"""
Get the API endpoint for fetching dynamic options for a custom frontend type.
@@ -121,37 +125,3 @@ def isCustomType(frontendType: FrontendType) -> bool:
"""
return frontendType in CUSTOM_TYPE_OPTIONS_API
-
-def getCustomTypeDescription(frontendType: FrontendType, language: str = "en") -> Optional[str]:
- """
- Get the description for a custom frontend type.
-
- Args:
- frontendType: The frontend type to get description for
- language: Language code (default: "en")
-
- Returns:
- Description string or None if not a custom type
- """
- descriptions = CUSTOM_TYPE_DESCRIPTIONS.get(frontendType)
- if not descriptions:
- return None
- return descriptions.get(language, descriptions.get("en"))
-
-
-def registerCustomType(
- frontendType: FrontendType,
- optionsApiEndpoint: str,
- description: Dict[str, str]
-) -> None:
- """
- Register a new custom frontend type.
-
- Args:
- frontendType: The frontend type enum value
- optionsApiEndpoint: API endpoint for fetching options (e.g., "custom.type")
- description: Multilingual description dict (e.g., {"en": "Description", "fr": "Description"})
- """
- CUSTOM_TYPE_OPTIONS_API[frontendType] = optionsApiEndpoint
- CUSTOM_TYPE_DESCRIPTIONS[frontendType] = description
-
diff --git a/modules/shared/i18nRegistry.py b/modules/shared/i18nRegistry.py
new file mode 100644
index 00000000..f681b19f
--- /dev/null
+++ b/modules/shared/i18nRegistry.py
@@ -0,0 +1,706 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Gateway i18n registry: t(), @i18nModel, boot-sync, in-memory cache.
+
+All UI-visible texts in the gateway (HTTPException details, model labels,
+API messages) are tagged with t() and registered at import time.
+At boot, the registry is synced to the xx base set in the DB.
+At runtime, t() returns the cached translation for the current request language.
+"""
+
+from __future__ import annotations
+
+import logging
+from contextvars import ContextVar
+from dataclasses import dataclass, field as dataclass_field
+from typing import Any, Dict, List, Optional, Type
+
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+
+def _extractRegistrySourceText(obj: Any) -> str:
+ """Resolve a str or multilingual dict to one canonical registry key string."""
+ if isinstance(obj, str):
+ return obj
+ if isinstance(obj, dict):
+ return obj.get("xx") or next(iter(obj.values()), "") or ""
+ return ""
+
+
+# ---------------------------------------------------------------------------
+# Registry (populated at import time by t() and @i18nModel)
+# ---------------------------------------------------------------------------
+
+@dataclass
+class _I18nRegistryEntry:
+ context: str
+ value: str
+
+
+_REGISTRY: Dict[str, _I18nRegistryEntry] = {}
+
+# ---------------------------------------------------------------------------
+# Translation cache (populated at boot by _loadCache)
+# ---------------------------------------------------------------------------
+
+_CACHE: Dict[str, Dict[str, str]] = {}
+
+# ---------------------------------------------------------------------------
+# Per-request language (set by middleware)
+# ---------------------------------------------------------------------------
+
+_CURRENT_LANGUAGE: ContextVar[str] = ContextVar("i18n_lang", default="de")
+
+# ---------------------------------------------------------------------------
+# Model labels (backwards-compatible with getModelLabels / getModelLabel)
+# ---------------------------------------------------------------------------
+
+MODEL_LABELS: Dict[str, Dict[str, Any]] = {}
+
+
+# ---------------------------------------------------------------------------
+# t() -- tag and translate
+# ---------------------------------------------------------------------------
+
+def t(key: str, context: str = "api", value: str = "") -> str:
+ """Tag a UI-visible text for i18n and return the translation.
+
+ At import time: registers the key with context and AI description.
+ At runtime: returns the cached translation for _CURRENT_LANGUAGE.
+ Falls back to [key] so missing translations are visible in the UI.
+ """
+ if key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context=context, value=value)
+ lang = _CURRENT_LANGUAGE.get()
+ if lang == "de":
+ return key
+ return _CACHE.get(lang, {}).get(key, f"[{key}]")
+
+
+def resolveText(value: Any, lang: Optional[str] = None) -> str:
+ """Resolve any value to a translated string for the current request language.
+
+ Accepts str, dict, TextMultilingual, or None.
+ - str: translate via t() (treats as i18n key / German plaintext key)
+ - dict: multilingual user content — pick ``lang`` (or current context), then ``xx``, then first value
+ - object with model_dump(): convert to dict first (TextMultilingual)
+ - None/empty: return ""
+
+ If ``lang`` is given, it temporarily overrides the context language for this call
+ (used by schedulers that have an explicit user language).
+
+ Missing i18n translations for string keys use t()'s ``[key]`` fallback.
+ """
+ if lang is not None:
+ token = _CURRENT_LANGUAGE.set(lang)
+ try:
+ return _resolveTextImpl(value)
+ finally:
+ _CURRENT_LANGUAGE.reset(token)
+ return _resolveTextImpl(value)
+
+
+def _resolveTextImpl(value: Any) -> str:
+ if value is None:
+ return ""
+ if isinstance(value, str):
+ if not value.strip():
+ return ""
+ return t(value)
+ if hasattr(value, "model_dump"):
+ value = value.model_dump()
+ if isinstance(value, dict):
+ if not value:
+ return ""
+ lang = _CURRENT_LANGUAGE.get()
+ text = value.get(lang) or value.get("xx")
+ if text:
+ return str(text)
+ first = next((v for v in value.values() if v), None)
+ return str(first) if first else ""
+ return str(value)
+
+
+def apiRouteContext(routeModuleName: str):
+ """Return a callable that registers + translates HTTPException details.
+
+ The key is registered eagerly in ``_REGISTRY`` the moment ``_apiMsg(key)``
+ is evaluated (module-level ``detail=routeApiMsg("…")`` runs at import time).
+ At runtime ``t()`` returns the cached translation for the current language.
+ """
+ _ctx = f"api.{routeModuleName}"
+
+ def _apiMsg(key: str, value: str = "") -> str:
+ if key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context=_ctx, value=value)
+ return t(key, _ctx, value)
+ return _apiMsg
+
+
+# ---------------------------------------------------------------------------
+# @i18nModel -- class decorator for Pydantic models
+# ---------------------------------------------------------------------------
+
+def i18nModel(modelLabel: str, aiContext: str = ""):
+ """Class decorator: registers model and field labels for i18n.
+
+ 1. Registers t(modelLabel, "table.", aiContext or docstring)
+ 2. For each Field with json_schema_extra["label"]:
+ Registers t(label, "table..", field.description)
+ 3. Populates MODEL_LABELS for getModelLabels()/getModelLabel() in attributeUtils
+ """
+ def _decorator(cls: Type[BaseModel]) -> Type[BaseModel]:
+ className = cls.__name__
+ ctx = aiContext or _extractDocstringFirstLine(cls)
+ t(modelLabel, f"table.{className}", ctx)
+
+ attributes: Dict[str, str] = {}
+ for fieldName, fieldInfo in cls.model_fields.items():
+ extra = fieldInfo.json_schema_extra
+ if not isinstance(extra, dict):
+ continue
+ label = extra.get("label")
+ if label:
+ desc = fieldInfo.description or ""
+ t(label, f"table.{className}.{fieldName}", desc)
+ attributes[fieldName] = label
+ else:
+ attributes[fieldName] = fieldName
+
+ MODEL_LABELS[className] = {
+ "model": modelLabel,
+ "attributes": attributes,
+ }
+ return cls
+ return _decorator
+
+
+def _extractDocstringFirstLine(cls: type) -> str:
+ doc = cls.__doc__
+ if not doc:
+ return ""
+ return doc.strip().split("\n")[0].strip()
+
+
+# ---------------------------------------------------------------------------
+# Language setter (called by middleware)
+# ---------------------------------------------------------------------------
+
+def _setLanguage(lang: str):
+ """Set the language for the current request context."""
+ _CURRENT_LANGUAGE.set(lang)
+
+
+def _getLanguage() -> str:
+ """Get the language for the current request context."""
+ return _CURRENT_LANGUAGE.get()
+
+
+def normalizePrimaryLanguageTag(tag: str, fallback: str = "de") -> str:
+ """Primary language subtag from ``Accept-Language`` or a single BCP47 tag.
+
+ Supports 2-letter (ISO 639-1) and 3-letter (ISO 639-2/3) primaries such as ``gsw``.
+ Strips region/variant: ``de-CH`` → ``de``, ``zh-Hans-CN`` → ``zh``.
+ """
+ if not tag or not isinstance(tag, str):
+ return fallback
+ first = tag.split(",")[0].split(";")[0].strip()
+ if not first:
+ return fallback
+ primary = first.split("-")[0].split("_")[0].lower()
+ if primary.isalpha() and 2 <= len(primary) <= 8:
+ return primary
+ return fallback
+
+
+# ---------------------------------------------------------------------------
+# Boot: scan route files for routeApiMsg("…") calls → register eagerly
+# ---------------------------------------------------------------------------
+
+_ROUTE_API_MSG_RE = None # compiled lazily
+
+def _scanRouteApiMsgKeys():
+ """Scan all gateway route/feature Python files for routeApiMsg("…") calls
+ and register the keys in _REGISTRY so they appear in the boot DB sync.
+ """
+ import re
+ from pathlib import Path
+
+ global _ROUTE_API_MSG_RE
+ if _ROUTE_API_MSG_RE is None:
+ _ROUTE_API_MSG_RE = re.compile(
+ r"""routeApiMsg\(\s*(['"])((?:\\.|(?!\1).)+)\1""",
+ )
+
+ gatewayRoot = Path(__file__).resolve().parents[1]
+ scanDirs = [gatewayRoot / "routes", gatewayRoot / "features"]
+
+ _ctxRe = re.compile(r'''apiRouteContext\(\s*['"]([^'"]+)['"]\s*\)''')
+
+ for scanDir in scanDirs:
+ if not scanDir.is_dir():
+ continue
+ for pyFile in scanDir.rglob("*.py"):
+ try:
+ src = pyFile.read_text(encoding="utf-8", errors="replace")
+ except OSError:
+ continue
+ ctxMatch = _ctxRe.search(src)
+ if not ctxMatch:
+ continue
+ ctx = f"api.{ctxMatch.group(1)}"
+ for m in _ROUTE_API_MSG_RE.finditer(src):
+ key = m.group(2).replace("\\'", "'").replace('\\"', '"')
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context=ctx, value="")
+
+ logger.info("i18n route scan: %d api.* keys in registry after scan",
+ sum(1 for e in _REGISTRY.values() if e.context.startswith("api.")))
+
+
+def _registerNavLabels():
+ """Register all navigation labels from NAVIGATION_SECTIONS as i18n keys.
+
+ Called at boot before DB sync so that nav labels appear in the xx base set
+ and can be translated via the Admin UI.
+ """
+ try:
+ from modules.system.mainSystem import NAVIGATION_SECTIONS
+ except ImportError:
+ logger.warning("i18n: could not import NAVIGATION_SECTIONS for nav label registration")
+ return
+
+ count = 0
+ for section in NAVIGATION_SECTIONS:
+ title = section.get("title", "")
+ if title and title not in _REGISTRY:
+ _REGISTRY[title] = _I18nRegistryEntry(context="nav", value="")
+ count += 1
+
+ for item in section.get("items", []):
+ label = item.get("label", "")
+ if label and label not in _REGISTRY:
+ _REGISTRY[label] = _I18nRegistryEntry(context="nav", value="")
+ count += 1
+
+ for subgroup in section.get("subgroups", []):
+ sgTitle = subgroup.get("title", "")
+ if sgTitle and sgTitle not in _REGISTRY:
+ _REGISTRY[sgTitle] = _I18nRegistryEntry(context="nav", value="")
+ count += 1
+ for item in subgroup.get("items", []):
+ label = item.get("label", "")
+ if label and label not in _REGISTRY:
+ _REGISTRY[label] = _I18nRegistryEntry(context="nav", value="")
+ count += 1
+
+ logger.info("i18n nav labels: registered %d nav keys", count)
+
+
+def _registerFeatureUiLabels():
+ """Register FEATURE_LABEL and UI_OBJECTS labels from all feature modules (German i18n keys)."""
+ try:
+ from modules.system import mainSystem as _mainSystem
+ _fl = getattr(_mainSystem, "FEATURE_LABEL", None)
+ if isinstance(_fl, str) and _fl and _fl not in _REGISTRY:
+ _REGISTRY[_fl] = _I18nRegistryEntry(context="nav", value="")
+ except ImportError:
+ pass
+
+ _featureModulePaths = (
+ "modules.features.trustee.mainTrustee",
+ "modules.features.graphicalEditor.mainGraphicalEditor",
+ "modules.features.commcoach.mainCommcoach",
+ "modules.features.teamsbot.mainTeamsbot",
+ "modules.features.workspace.mainWorkspace",
+ "modules.features.realEstate.mainRealEstate",
+ "modules.features.neutralization.mainNeutralization",
+ "modules.features.chatbot.mainChatbot",
+ )
+ added = 0
+ for modPath in _featureModulePaths:
+ try:
+ mod = __import__(modPath, fromlist=["FEATURE_LABEL", "UI_OBJECTS"])
+ except ImportError:
+ continue
+ fl = getattr(mod, "FEATURE_LABEL", None)
+ if isinstance(fl, str) and fl and fl not in _REGISTRY:
+ _REGISTRY[fl] = _I18nRegistryEntry(context="nav", value="")
+ added += 1
+ for uiObj in getattr(mod, "UI_OBJECTS", []) or []:
+ base = _extractRegistrySourceText(uiObj.get("label"))
+ if base and base not in _REGISTRY:
+ _REGISTRY[base] = _I18nRegistryEntry(context="nav", value="")
+ added += 1
+ logger.info("i18n feature UI labels: %d new keys (nav context)", added)
+
+
+def _registerRbacLabels():
+ """Register DATA_OBJECTS, RESOURCE_OBJECTS labels and TEMPLATE_ROLES descriptions
+ from all feature modules and system module as i18n keys.
+
+ context mapping:
+ - DATA_OBJECTS → rbac.data
+ - RESOURCE_OBJECTS → rbac.resource
+ - TEMPLATE_ROLES[].description (xx source) → rbac.role
+ - QUICK_ACTIONS[].label/description (xx source) → rbac.quickaction
+ - QUICK_ACTION_CATEGORIES[].label (xx source) → rbac.quickaction
+ """
+ _systemModule = "modules.system.mainSystem"
+ _featureModulePaths = (
+ _systemModule,
+ "modules.features.trustee.mainTrustee",
+ "modules.features.graphicalEditor.mainGraphicalEditor",
+ "modules.features.commcoach.mainCommcoach",
+ "modules.features.teamsbot.mainTeamsbot",
+ "modules.features.workspace.mainWorkspace",
+ "modules.features.realEstate.mainRealEstate",
+ "modules.features.neutralization.mainNeutralization",
+ "modules.features.chatbot.mainChatbot",
+ )
+
+ added = 0
+ for modPath in _featureModulePaths:
+ try:
+ mod = __import__(modPath, fromlist=[
+ "DATA_OBJECTS", "RESOURCE_OBJECTS", "TEMPLATE_ROLES",
+ "QUICK_ACTIONS", "QUICK_ACTION_CATEGORIES",
+ ])
+ except ImportError:
+ continue
+
+ for dataObj in getattr(mod, "DATA_OBJECTS", []) or []:
+ key = _extractRegistrySourceText(dataObj.get("label"))
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context="rbac.data", value="")
+ added += 1
+
+ for resObj in getattr(mod, "RESOURCE_OBJECTS", []) or []:
+ key = _extractRegistrySourceText(resObj.get("label"))
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context="rbac.resource", value="")
+ added += 1
+
+ for role in getattr(mod, "TEMPLATE_ROLES", []) or []:
+ key = _extractRegistrySourceText(role.get("description"))
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context="rbac.role", value="")
+ added += 1
+
+ for qa in getattr(mod, "QUICK_ACTIONS", []) or []:
+ for field in ("label", "description"):
+ key = _extractRegistrySourceText(qa.get(field))
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context="rbac.quickaction", value="")
+ added += 1
+
+ for cat in getattr(mod, "QUICK_ACTION_CATEGORIES", []) or []:
+ key = _extractRegistrySourceText(cat.get("label"))
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context="rbac.quickaction", value="")
+ added += 1
+
+ logger.info("i18n rbac labels: %d new keys (rbac.* context)", added)
+
+
+def _registerServiceCenterLabels():
+ """Register service-center category labels and bootstrap role descriptions."""
+ added = 0
+
+ try:
+ from modules.serviceCenter.registry import IMPORTABLE_SERVICES
+ for svc in IMPORTABLE_SERVICES.values():
+ key = _extractRegistrySourceText(svc.get("label"))
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context="service", value="")
+ added += 1
+ except ImportError:
+ pass
+
+ _bootstrapRoleDescriptions = [
+ "Administrator - Benutzer und Ressourcen im Mandanten verwalten",
+ "Benutzer - Standard-Benutzer mit Zugriff auf eigene Datensätze",
+ "Betrachter - Nur-Lese-Zugriff auf Gruppen-Datensätze",
+ "System-Administrator - Vollständiger administrativer Zugriff über alle Mandanten",
+ ]
+ for desc in _bootstrapRoleDescriptions:
+ if desc not in _REGISTRY:
+ _REGISTRY[desc] = _I18nRegistryEntry(context="rbac.role", value="")
+ added += 1
+
+ logger.info("i18n service/bootstrap labels: %d new keys", added)
+
+
+def _registerNodeLabels():
+ """Register all graph-editor node labels, descriptions, parameter descriptions,
+ output labels, port descriptions, category labels, and entry-point titles."""
+ added = 0
+
+ def _reg(key: str, ctx: str):
+ nonlocal added
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context=ctx, value="")
+ added += 1
+
+ try:
+ from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+ for nd in STATIC_NODE_TYPES:
+ _reg(_extractRegistrySourceText(nd.get("label")), "node.label")
+ _reg(_extractRegistrySourceText(nd.get("description")), "node.desc")
+
+ for param in nd.get("parameters", []) or []:
+ _reg(_extractRegistrySourceText(param.get("description")), "node.param")
+ _reg(_extractRegistrySourceText(param.get("label")), "node.param")
+
+ outLabels = nd.get("outputLabels")
+ if isinstance(outLabels, dict):
+ sourceList = outLabels.get("xx") or next(iter(outLabels.values()), [])
+ if not isinstance(sourceList, list):
+ sourceList = []
+ for lbl in sourceList:
+ _reg(lbl, "node.output")
+ elif isinstance(outLabels, list):
+ for lbl in outLabels:
+ _reg(lbl, "node.output")
+ except ImportError:
+ pass
+
+ try:
+ from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG
+ for schema in PORT_TYPE_CATALOG.values():
+ for field in getattr(schema, "fields", []) or []:
+ desc = getattr(field, "description", None)
+ if desc:
+ _reg(_extractRegistrySourceText(desc if isinstance(desc, (str, dict)) else None), "port.desc")
+ except ImportError:
+ pass
+
+ _nodeCategoryLabels = [
+ "Trigger", "Eingabe/Mensch", "Ablauf", "Daten", "KI",
+ "Datei", "E-Mail", "SharePoint", "ClickUp", "Treuhand",
+ ]
+ for lbl in _nodeCategoryLabels:
+ _reg(lbl, "node.category")
+
+ _entryPointTitles = ["Jetzt ausführen", "Start"]
+ for lbl in _entryPointTitles:
+ _reg(lbl, "node.entry")
+
+ logger.info("i18n node labels: %d new keys (node.*/port.* context)", added)
+
+
+def _registerDatamodelOptionLabels():
+ """Register all frontend_options labels from Pydantic datamodels and subscription plans."""
+ added = 0
+
+ def _reg(key: str, ctx: str):
+ nonlocal added
+ if key and key not in _REGISTRY:
+ _REGISTRY[key] = _I18nRegistryEntry(context=ctx, value="")
+ added += 1
+
+ _datamodelModules = (
+ "modules.datamodels.datamodelRbac",
+ "modules.datamodels.datamodelChat",
+ "modules.datamodels.datamodelMessaging",
+ "modules.datamodels.datamodelNotification",
+ "modules.datamodels.datamodelUam",
+ "modules.datamodels.datamodelFiles",
+ "modules.datamodels.datamodelDataSource",
+ "modules.datamodels.datamodelFeatureDataSource",
+ "modules.datamodels.datamodelUiLanguage",
+ "modules.features.trustee.datamodelFeatureTrustee",
+ "modules.features.neutralization.datamodelFeatureNeutralizer",
+ )
+
+ for modPath in _datamodelModules:
+ try:
+ mod = __import__(modPath, fromlist=["__all__"])
+ except ImportError:
+ continue
+ for attrName in dir(mod):
+ cls = getattr(mod, attrName, None)
+ if not isinstance(cls, type) or not issubclass(cls, BaseModel):
+ continue
+ for fieldName, fieldInfo in cls.model_fields.items():
+ extra = (fieldInfo.json_schema_extra or {}) if hasattr(fieldInfo, "json_schema_extra") else {}
+ if not isinstance(extra, dict):
+ continue
+ options = extra.get("frontend_options")
+ if not isinstance(options, list):
+ continue
+ ctx = f"option.{cls.__name__}.{fieldName}"
+ for opt in options:
+ if isinstance(opt, dict):
+ _reg(_extractRegistrySourceText(opt.get("label")), ctx)
+
+ try:
+ from modules.datamodels.datamodelSubscription import BUILTIN_PLANS
+ for plan in BUILTIN_PLANS.values():
+ _reg(_extractRegistrySourceText(getattr(plan, "title", None)), "subscription.title")
+ _reg(_extractRegistrySourceText(getattr(plan, "description", None)), "subscription.desc")
+ except (ImportError, AttributeError):
+ pass
+
+ logger.info("i18n datamodel option labels: %d new keys", added)
+
+
+# ---------------------------------------------------------------------------
+# Boot: sync registry to DB
+# ---------------------------------------------------------------------------
+
+async def _syncRegistryToDb():
+ """Boot hook: write all registered keys into UiLanguageSet(xx).
+
+ 1. Scans route files for routeApiMsg("…") to eagerly register api.* keys.
+ 2. Registers navigation labels as nav.* keys.
+ 3. Registers feature UI labels (FEATURE_LABEL, UI_OBJECTS).
+ 4. Registers RBAC labels (DATA/RESOURCE/ROLE/QuickAction).
+ 5. Merges with existing UI keys (context="ui"), only touches gateway keys.
+ """
+ _scanRouteApiMsgKeys()
+ _registerNavLabels()
+ _registerFeatureUiLabels()
+ _registerRbacLabels()
+ _registerServiceCenterLabels()
+ _registerNodeLabels()
+ _registerDatamodelOptionLabels()
+
+ if not _REGISTRY:
+ logger.info("i18n registry: no keys to sync (empty registry)")
+ return
+
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+ from modules.shared.configuration import APP_CONFIG
+ from modules.connectors.connectorDbPostgre import _get_cached_connector
+ from modules.shared.timeUtils import getUtcTimestamp
+
+ db = _get_cached_connector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase="poweron_management",
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
+ dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
+ userId="__i18n_boot__",
+ )
+
+ rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "xx"})
+
+ gatewayEntries = [
+ {"context": entry.context, "key": key, "value": entry.value}
+ for key, entry in _REGISTRY.items()
+ ]
+ gatewayKeys = set(_REGISTRY.keys())
+
+ if not rows:
+ now = getUtcTimestamp()
+ rec = {
+ "id": "xx",
+ "label": "Basisset (Meta)",
+ "entries": gatewayEntries,
+ "status": "complete",
+ "isDefault": True,
+ "sysCreatedAt": now,
+ "sysCreatedBy": "__i18n_boot__",
+ "sysModifiedAt": now,
+ "sysModifiedBy": "__i18n_boot__",
+ }
+ db.recordCreate(UiLanguageSet, rec)
+ logger.info("i18n boot-sync: created xx set with %d gateway keys", len(gatewayEntries))
+ return
+
+ row = dict(rows[0])
+ existingEntries: List[dict] = row.get("entries") or []
+ if not isinstance(existingEntries, list):
+ existingEntries = []
+
+ uiEntries = [e for e in existingEntries if e.get("context", "") == "ui"]
+
+ oldGatewayEntries = [
+ e for e in existingEntries
+ if e.get("context", "") != "ui"
+ ]
+ oldGatewayByKey = {e["key"]: e for e in oldGatewayEntries}
+
+ added = 0
+ updated = 0
+ removed = 0
+
+ newGatewayEntries: List[dict] = []
+ for key, entry in _REGISTRY.items():
+ newEntry = {"context": entry.context, "key": key, "value": entry.value}
+ old = oldGatewayByKey.get(key)
+ if old is None:
+ added += 1
+ elif old.get("context") != entry.context or old.get("value") != entry.value:
+ updated += 1
+ newGatewayEntries.append(newEntry)
+
+ removed = len(set(oldGatewayByKey.keys()) - gatewayKeys)
+
+ mergedEntries = uiEntries + newGatewayEntries
+
+ if added == 0 and updated == 0 and removed == 0:
+ logger.info("i18n boot-sync: xx set up-to-date (%d gateway + %d ui keys)", len(newGatewayEntries), len(uiEntries))
+ return
+
+ now = getUtcTimestamp()
+ row["entries"] = mergedEntries
+ if "keys" in row:
+ del row["keys"]
+ row["sysModifiedAt"] = now
+ row["sysModifiedBy"] = "__i18n_boot__"
+ db.recordModify(UiLanguageSet, "xx", row)
+
+ logger.info(
+ "i18n boot-sync: xx updated (+%d added, ~%d updated, -%d removed, total=%d gateway + %d ui)",
+ added, updated, removed, len(newGatewayEntries), len(uiEntries),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Boot: load translation cache
+# ---------------------------------------------------------------------------
+
+async def _loadCache():
+ """Boot hook: load all UiLanguageSets into the in-memory cache.
+
+ After this, t() lookups are O(1) dict access with no DB calls.
+ """
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+ from modules.shared.configuration import APP_CONFIG
+ from modules.connectors.connectorDbPostgre import _get_cached_connector
+
+ db = _get_cached_connector(
+ dbHost=APP_CONFIG.get("DB_HOST", "localhost"),
+ dbDatabase="poweron_management",
+ dbUser=APP_CONFIG.get("DB_USER"),
+ dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"),
+ dbPort=int(APP_CONFIG.get("DB_PORT", 5432)),
+ userId="__i18n_cache__",
+ )
+
+ rows = db.getRecordset(UiLanguageSet)
+ _CACHE.clear()
+
+ for row in rows:
+ code = row.get("id", "")
+ if code == "xx":
+ continue
+ entries = row.get("entries")
+ if not isinstance(entries, list):
+ continue
+ langDict: Dict[str, str] = {}
+ for e in entries:
+ key = e.get("key", "")
+ val = e.get("value", "")
+ if key and val:
+ langDict[key] = val
+ if langDict:
+ _CACHE[code] = langDict
+
+ logger.info("i18n cache loaded: %d languages, %d total keys",
+ len(_CACHE), sum(len(v) for v in _CACHE.values()))
diff --git a/modules/shared/jsonContinuation.py b/modules/shared/jsonContinuation.py
index dd71986e..22180b41 100644
--- a/modules/shared/jsonContinuation.py
+++ b/modules/shared/jsonContinuation.py
@@ -59,7 +59,7 @@ OVERLAP_MAX_CHARS: int = 1000
# =============================================================================
-class TokenType(Enum):
+class JsonTokenType(Enum):
"""JSON Token Types"""
OBJECT_START = "{"
OBJECT_END = "}"
@@ -77,9 +77,9 @@ class TokenType(Enum):
@dataclass
-class Token:
+class JsonToken:
"""Represents a JSON token with position info"""
- type: TokenType
+ type: JsonTokenType
value: Any
start_pos: int
end_pos: int
@@ -120,7 +120,7 @@ class JsonTokenizer:
return self.jsonStr[self.pos]
return None
- def readString(self) -> Token:
+ def readString(self) -> JsonToken:
"""Read a JSON string token"""
start_pos = self.pos
self.pos += 1 # Skip opening quote
@@ -142,15 +142,15 @@ class JsonTokenizer:
value = raw[1:-1] # Remove quotes for value
except:
value = raw
- return Token(TokenType.STRING, value, start_pos, self.pos, raw)
+ return JsonToken(JsonTokenType.STRING, value, start_pos, self.pos, raw)
else:
self.pos += 1
# String was truncated
raw = self.jsonStr[start_pos:self.pos]
- return Token(TokenType.TRUNCATED, raw[1:] if len(raw) > 1 else "", start_pos, self.pos, raw)
+ return JsonToken(JsonTokenType.TRUNCATED, raw[1:] if len(raw) > 1 else "", start_pos, self.pos, raw)
- def readNumber(self) -> Token:
+ def readNumber(self) -> JsonToken:
"""Read a JSON number token"""
start_pos = self.pos
@@ -182,54 +182,54 @@ class JsonTokenizer:
except ValueError:
value = raw
- return Token(TokenType.NUMBER, value, start_pos, self.pos, raw)
+ return JsonToken(JsonTokenType.NUMBER, value, start_pos, self.pos, raw)
- def readKeyword(self) -> Token:
+ def readKeyword(self) -> JsonToken:
"""Read true, false, or null"""
start_pos = self.pos
- for keyword, token_type in [('true', TokenType.BOOLEAN),
- ('false', TokenType.BOOLEAN),
- ('null', TokenType.NULL)]:
+ for keyword, token_type in [('true', JsonTokenType.BOOLEAN),
+ ('false', JsonTokenType.BOOLEAN),
+ ('null', JsonTokenType.NULL)]:
if self.jsonStr[self.pos:].startswith(keyword):
self.pos += len(keyword)
value = True if keyword == 'true' else (False if keyword == 'false' else None)
- return Token(token_type, value, start_pos, self.pos, keyword)
+ return JsonToken(token_type, value, start_pos, self.pos, keyword)
# Partial keyword (truncated)
while self.pos < self.length and self.jsonStr[self.pos].isalpha():
self.pos += 1
raw = self.jsonStr[start_pos:self.pos]
- return Token(TokenType.TRUNCATED, raw, start_pos, self.pos, raw)
+ return JsonToken(JsonTokenType.TRUNCATED, raw, start_pos, self.pos, raw)
- def nextToken(self) -> Token:
+ def nextJsonToken(self) -> JsonToken:
"""Get the next token"""
self.skipWhitespace()
if self.pos >= self.length:
- return Token(TokenType.EOF, None, self.pos, self.pos, "")
+ return JsonToken(JsonTokenType.EOF, None, self.pos, self.pos, "")
char = self.jsonStr[self.pos]
startPos = self.pos
if char == '{':
self.pos += 1
- return Token(TokenType.OBJECT_START, '{', startPos, self.pos, '{')
+ return JsonToken(JsonTokenType.OBJECT_START, '{', startPos, self.pos, '{')
elif char == '}':
self.pos += 1
- return Token(TokenType.OBJECT_END, '}', startPos, self.pos, '}')
+ return JsonToken(JsonTokenType.OBJECT_END, '}', startPos, self.pos, '}')
elif char == '[':
self.pos += 1
- return Token(TokenType.ARRAY_START, '[', startPos, self.pos, '[')
+ return JsonToken(JsonTokenType.ARRAY_START, '[', startPos, self.pos, '[')
elif char == ']':
self.pos += 1
- return Token(TokenType.ARRAY_END, ']', startPos, self.pos, ']')
+ return JsonToken(JsonTokenType.ARRAY_END, ']', startPos, self.pos, ']')
elif char == ':':
self.pos += 1
- return Token(TokenType.COLON, ':', startPos, self.pos, ':')
+ return JsonToken(JsonTokenType.COLON, ':', startPos, self.pos, ':')
elif char == ',':
self.pos += 1
- return Token(TokenType.COMMA, ',', startPos, self.pos, ',')
+ return JsonToken(JsonTokenType.COMMA, ',', startPos, self.pos, ',')
elif char == '"':
return self.readString()
elif char == '-' or char.isdigit():
@@ -239,7 +239,7 @@ class JsonTokenizer:
else:
# Unknown character, treat as truncated
self.pos += 1
- return Token(TokenType.TRUNCATED, char, startPos, self.pos, char)
+ return JsonToken(JsonTokenType.TRUNCATED, char, startPos, self.pos, char)
@dataclass
@@ -632,25 +632,25 @@ class JsonAnalyzer:
in_value = False
while True:
- token = tokenizer.nextToken()
+ token = tokenizer.nextJsonToken()
- if token.type == TokenType.EOF:
+ if token.type == JsonTokenType.EOF:
break
- if token.type == TokenType.TRUNCATED:
+ if token.type == JsonTokenType.TRUNCATED:
# Return position before the truncated part
break
- if token.type in (TokenType.OBJECT_START, TokenType.ARRAY_START):
+ if token.type in (JsonTokenType.OBJECT_START, JsonTokenType.ARRAY_START):
stack_depth += 1
in_value = True
- elif token.type in (TokenType.OBJECT_END, TokenType.ARRAY_END):
+ elif token.type in (JsonTokenType.OBJECT_END, JsonTokenType.ARRAY_END):
stack_depth -= 1
last_value_end = token.end_pos
in_value = False
- elif token.type == TokenType.STRING:
+ elif token.type == JsonTokenType.STRING:
# Check if this is a key or a value
saved_pos = tokenizer.pos
tokenizer.skipWhitespace()
@@ -662,11 +662,11 @@ class JsonAnalyzer:
last_value_end = token.end_pos
in_value = False
- elif token.type in (TokenType.NUMBER, TokenType.BOOLEAN, TokenType.NULL):
+ elif token.type in (JsonTokenType.NUMBER, JsonTokenType.BOOLEAN, JsonTokenType.NULL):
last_value_end = token.end_pos
in_value = False
- elif token.type == TokenType.COMMA:
+ elif token.type == JsonTokenType.COMMA:
# After a comma, we've completed a value
last_complete_pos = last_value_end
@@ -714,12 +714,12 @@ class JsonAnalyzer:
tokenizer = JsonTokenizer(self.jsonStr)
while True:
- token = tokenizer.nextToken()
+ token = tokenizer.nextJsonToken()
- if token.type == TokenType.EOF or token.type == TokenType.TRUNCATED:
+ if token.type == JsonTokenType.EOF or token.type == JsonTokenType.TRUNCATED:
break
- if token.type == TokenType.OBJECT_START:
+ if token.type == JsonTokenType.OBJECT_START:
frame = StackFrame(
type="object",
start_pos=token.start_pos,
@@ -727,7 +727,7 @@ class JsonAnalyzer:
)
self.stack.append(frame)
- elif token.type == TokenType.ARRAY_START:
+ elif token.type == JsonTokenType.ARRAY_START:
frame = StackFrame(
type="array",
start_pos=token.start_pos,
@@ -735,24 +735,24 @@ class JsonAnalyzer:
)
self.stack.append(frame)
- elif token.type == TokenType.OBJECT_END:
+ elif token.type == JsonTokenType.OBJECT_END:
if self.stack and self.stack[-1].type == "object":
self.stack.pop()
- elif token.type == TokenType.ARRAY_END:
+ elif token.type == JsonTokenType.ARRAY_END:
if self.stack and self.stack[-1].type == "array":
self.stack.pop()
- elif token.type == TokenType.STRING:
+ elif token.type == JsonTokenType.STRING:
# Could be a key or a value
- self._handleStringToken(token, tokenizer)
+ self._handleStringJsonToken(token, tokenizer)
- elif token.type == TokenType.COMMA:
+ elif token.type == JsonTokenType.COMMA:
# Increment array index
if self.stack and self.stack[-1].type == "array":
self.stack[-1].index += 1
- def _handleStringToken(self, token: Token, tokenizer: JsonTokenizer):
+ def _handleStringJsonToken(self, token: JsonToken, tokenizer: JsonTokenizer):
"""Handle a string token (could be key or value)"""
if self.stack and self.stack[-1].type == "object":
# Check if this is a key (followed by colon)
@@ -995,12 +995,12 @@ class JsonAnalyzer:
current_key = None
while True:
- token = tokenizer.nextToken()
+ token = tokenizer.nextJsonToken()
- if token.type == TokenType.EOF:
+ if token.type == JsonTokenType.EOF:
break
- if token.type == TokenType.TRUNCATED:
+ if token.type == JsonTokenType.TRUNCATED:
# Mark the truncation point
if stack:
current = stack[-1]
@@ -1020,7 +1020,7 @@ class JsonAnalyzer:
})
break
- if token.type == TokenType.OBJECT_START:
+ if token.type == JsonTokenType.OBJECT_START:
obj = {
'type': 'object',
'key': current_key,
@@ -1032,7 +1032,7 @@ class JsonAnalyzer:
stack.append(obj)
current_key = None
- elif token.type == TokenType.ARRAY_START:
+ elif token.type == JsonTokenType.ARRAY_START:
arr = {
'type': 'array',
'key': current_key,
@@ -1044,19 +1044,19 @@ class JsonAnalyzer:
stack.append(arr)
current_key = None
- elif token.type == TokenType.OBJECT_END:
+ elif token.type == JsonTokenType.OBJECT_END:
if len(stack) > 1 and stack[-1].get('type') == 'object':
stack[-1]['end_pos'] = token.end_pos
stack[-1]['complete'] = True
stack.pop()
- elif token.type == TokenType.ARRAY_END:
+ elif token.type == JsonTokenType.ARRAY_END:
if len(stack) > 1 and stack[-1].get('type') == 'array':
stack[-1]['end_pos'] = token.end_pos
stack[-1]['complete'] = True
stack.pop()
- elif token.type == TokenType.STRING:
+ elif token.type == JsonTokenType.STRING:
# Check if it's a key
saved_pos = tokenizer.pos
tokenizer.skipWhitespace()
@@ -1081,7 +1081,7 @@ class JsonAnalyzer:
tokenizer.pos = saved_pos
- elif token.type in (TokenType.NUMBER, TokenType.BOOLEAN, TokenType.NULL):
+ elif token.type in (JsonTokenType.NUMBER, JsonTokenType.BOOLEAN, JsonTokenType.NULL):
value_node = {
'type': 'value',
'key': current_key,
diff --git a/modules/shared/progressLogger.py b/modules/shared/progressLogger.py
index 1b67f73e..dbcb569a 100644
--- a/modules/shared/progressLogger.py
+++ b/modules/shared/progressLogger.py
@@ -149,6 +149,10 @@ class ProgressLogger:
# Parent operation never existed - log warning
logger.debug(f"WARNING: Parent operation '{parentOperationId}' not found in activeOperations when creating log for '{operationId}'. Available operations: {list(self.activeOperations.keys())}. Child operation may appear at root level.")
+ wfId = getattr(workflow, 'id', None) or ""
+ if isinstance(wfId, str) and wfId.startswith("transient-"):
+ return None
+
# Get round number from workflow - include in operationId for unique per-round operations
roundNumber = getattr(workflow, 'currentRound', None) or 1
diff --git a/modules/system/mainSystem.py b/modules/system/mainSystem.py
index a424c973..6f9163d9 100644
--- a/modules/system/mainSystem.py
+++ b/modules/system/mainSystem.py
@@ -11,11 +11,13 @@ Also defines the navigation structure for the frontend.
import logging
from typing import Dict, List, Any, Optional
+from modules.shared.i18nRegistry import t
+
logger = logging.getLogger(__name__)
# System metadata
FEATURE_CODE = "system"
-FEATURE_LABEL = {"en": "System", "de": "System", "fr": "Système"}
+FEATURE_LABEL = "System"
FEATURE_ICON = "mdi-cog"
# =============================================================================
@@ -35,104 +37,132 @@ FEATURE_ICON = "mdi-cog"
# icon: Wird intern gehalten aber NICHT in der API Response zurückgegeben
NAVIGATION_SECTIONS = [
+ # ─── Meine Sicht (with top-level item + subgroups) ───
{
"id": "system",
- "title": {"en": "SYSTEM", "de": "SYSTEM", "fr": "SYSTÈME"},
+ "title": t("Meine Sicht"),
"order": 10,
"items": [
{
"id": "home",
"objectKey": "ui.system.home",
- "label": {"en": "Home", "de": "Übersicht", "fr": "Accueil"},
+ "label": t("Übersicht"),
"icon": "FaHome",
"path": "/",
"order": 10,
"public": True,
},
{
- "id": "store",
- "objectKey": "ui.system.store",
- "label": {"en": "Store", "de": "Store", "fr": "Store"},
- "icon": "FaStore",
- "path": "/store",
+ "id": "integrations",
+ "objectKey": "ui.system.integrations",
+ "label": t("Integrationen"),
+ "icon": "FaProjectDiagram",
+ "path": "/integrations",
"order": 15,
"public": True,
},
- {
- "id": "settings",
- "objectKey": "ui.system.settings",
- "label": {"en": "Settings", "de": "Einstellungen", "fr": "Paramètres"},
- "icon": "FaCog",
- "path": "/settings",
- "order": 20,
- "public": True,
- },
],
- },
- {
- "id": "basedata",
- "title": {"en": "BASE DATA", "de": "BASISDATEN", "fr": "DONNÉES DE BASE"},
- "order": 30,
- "items": [
+ "subgroups": [
+ # ── Basisdaten ──
{
- "id": "prompts",
- "objectKey": "ui.system.prompts",
- "label": {"en": "Prompts", "de": "Prompts", "fr": "Prompts"},
- "icon": "FaLightbulb",
- "path": "/basedata/prompts",
- "order": 10,
- },
- {
- "id": "files",
- "objectKey": "ui.system.files",
- "label": {"en": "Files", "de": "Dateien", "fr": "Fichiers"},
- "icon": "FaRegFileAlt",
- "path": "/basedata/files",
+ "id": "system-basedata",
+ "title": t("Basisdaten"),
"order": 20,
+ "items": [
+ {
+ "id": "connections",
+ "objectKey": "ui.system.connections",
+ "label": t("Verbindungen"),
+ "icon": "FaLink",
+ "path": "/basedata/connections",
+ "order": 10,
+ },
+ {
+ "id": "files",
+ "objectKey": "ui.system.files",
+ "label": t("Dateien"),
+ "icon": "FaRegFileAlt",
+ "path": "/basedata/files",
+ "order": 20,
+ },
+ {
+ "id": "prompts",
+ "objectKey": "ui.system.prompts",
+ "label": t("Prompts"),
+ "icon": "FaLightbulb",
+ "path": "/basedata/prompts",
+ "order": 30,
+ },
+ ],
},
+ # ── Nutzung ──
{
- "id": "connections",
- "objectKey": "ui.system.connections",
- "label": {"en": "Connections", "de": "Verbindungen", "fr": "Connexions"},
- "icon": "FaLink",
- "path": "/basedata/connections",
+ "id": "system-usage",
+ "title": t("Nutzung"),
"order": 30,
- },
- ],
- },
- {
- "id": "billing",
- "title": {"en": "BILLING", "de": "BILLING", "fr": "FACTURATION"},
- "order": 35,
- "items": [
- {
- "id": "billing-transactions",
- "objectKey": "ui.billing.transactions",
- "label": {"en": "Billing", "de": "Billing", "fr": "Facturation"},
- "icon": "FaWallet",
- "path": "/billing/transactions",
- "order": 10,
+ "items": [
+ {
+ "id": "billing-admin",
+ "objectKey": "ui.system.billingAdmin",
+ "label": t("Abrechnung"),
+ "icon": "FaMoneyBillAlt",
+ "path": "/billing/admin",
+ "order": 10,
+ },
+ {
+ "id": "statistics",
+ "objectKey": "ui.system.statistics",
+ "label": t("Statistiken"),
+ "icon": "FaChartBar",
+ "path": "/billing/transactions",
+ "order": 20,
+ },
+ {
+ "id": "automations",
+ "objectKey": "ui.system.automations",
+ "label": t("Automations"),
+ "icon": "FaRobot",
+ "path": "/automations",
+ "order": 30,
+ },
+ {
+ "id": "store",
+ "objectKey": "ui.system.store",
+ "label": t("Store"),
+ "icon": "FaStore",
+ "path": "/store",
+ "order": 40,
+ "public": True,
+ },
+ {
+ "id": "settings",
+ "objectKey": "ui.system.settings",
+ "label": t("Einstellungen"),
+ "icon": "FaCog",
+ "path": "/settings",
+ "order": 50,
+ "public": True,
+ },
+ ],
},
],
},
# ─── Administration (with subgroups) ───
- # Access control is at item level, NOT section level.
- # Groups auto-hide if 0 visible pages for the user.
{
"id": "admin",
- "title": {"en": "ADMINISTRATION", "de": "ADMINISTRATION", "fr": "ADMINISTRATION"},
+ "title": t("Administration"),
"order": 200,
"subgroups": [
# ── Wizards ──
{
"id": "admin-wizards",
- "title": {"en": "Wizards", "de": "Wizards", "fr": "Assistants"},
+ "title": t("Wizards"),
"order": 10,
"items": [
{
"id": "admin-mandate-wizard",
"objectKey": "ui.admin.mandateWizard",
- "label": {"en": "Mandate Wizard", "de": "Mandanten-Wizard", "fr": "Assistant mandat"},
+ "label": t("Mandanten-Wizard"),
"icon": "FaMagic",
"path": "/admin/mandate-wizard",
"order": 10,
@@ -141,7 +171,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-invitation-wizard",
"objectKey": "ui.admin.invitationWizard",
- "label": {"en": "Invitation Wizard", "de": "Einladungs-Wizard", "fr": "Assistant d'invitation"},
+ "label": t("Einladungs-Wizard"),
"icon": "FaEnvelopeOpenText",
"path": "/admin/invitation-wizard",
"order": 20,
@@ -152,13 +182,13 @@ NAVIGATION_SECTIONS = [
# ── Users ──
{
"id": "admin-users-group",
- "title": {"en": "Users", "de": "Benutzer", "fr": "Utilisateurs"},
+ "title": t("Benutzer"),
"order": 20,
"items": [
{
"id": "admin-users",
"objectKey": "ui.admin.users",
- "label": {"en": "Users", "de": "Benutzer", "fr": "Utilisateurs"},
+ "label": t("Benutzer"),
"icon": "FaUsers",
"path": "/admin/users",
"order": 10,
@@ -167,7 +197,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-invitations",
"objectKey": "ui.admin.invitations",
- "label": {"en": "User Invitations", "de": "Benutzer-Einladungen", "fr": "Invitations utilisateurs"},
+ "label": t("Benutzer-Einladungen"),
"icon": "FaEnvelopeOpenText",
"path": "/admin/invitations",
"order": 20,
@@ -176,28 +206,19 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-user-access-overview",
"objectKey": "ui.admin.userAccessOverview",
- "label": {"en": "User Access Overview", "de": "Benutzer-Zugriffsübersicht", "fr": "Aperçu des accès utilisateur"},
+ "label": t("Benutzer-Zugriffsübersicht"),
"icon": "FaClipboardList",
"path": "/admin/user-access-overview",
"order": 30,
"adminOnly": True,
},
- {
- "id": "admin-billing",
- "objectKey": "ui.admin.billing",
- "label": {"en": "Billing Administration", "de": "Billing-Verwaltung", "fr": "Administration de facturation"},
- "icon": "FaMoneyBillAlt",
- "path": "/admin/billing",
- "order": 40,
- "adminOnly": True,
- },
{
"id": "admin-subscriptions",
"objectKey": "ui.admin.subscriptions",
- "label": {"en": "Subscriptions", "de": "Abonnements", "fr": "Abonnements"},
+ "label": t("Abonnements"),
"icon": "FaFileContract",
"path": "/admin/subscriptions",
- "order": 50,
+ "order": 40,
"adminOnly": True,
},
],
@@ -205,13 +226,13 @@ NAVIGATION_SECTIONS = [
# ── System ──
{
"id": "admin-system-group",
- "title": {"en": "System", "de": "System", "fr": "Système"},
+ "title": t("System"),
"order": 30,
"items": [
{
"id": "admin-roles",
"objectKey": "ui.admin.roles",
- "label": {"en": "Roles", "de": "Rollen", "fr": "Rôles"},
+ "label": t("Rollen"),
"icon": "FaUserTag",
"path": "/admin/mandate-roles",
"order": 10,
@@ -220,7 +241,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-mandate-role-permissions",
"objectKey": "ui.admin.mandateRolePermissions",
- "label": {"en": "Role Permissions", "de": "Rollen-Berechtigungen", "fr": "Permissions des rôles"},
+ "label": t("Rollen-Berechtigungen"),
"icon": "FaKey",
"path": "/admin/mandate-role-permissions",
"order": 20,
@@ -229,7 +250,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-mandates",
"objectKey": "ui.admin.mandates",
- "label": {"en": "Mandates", "de": "Mandanten", "fr": "Mandats"},
+ "label": t("Mandanten"),
"icon": "FaBuilding",
"path": "/admin/mandates",
"order": 30,
@@ -238,7 +259,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-user-mandates",
"objectKey": "ui.admin.userMandates",
- "label": {"en": "Mandate Members", "de": "Mandanten-Mitglieder", "fr": "Membres du mandat"},
+ "label": t("Mandanten-Mitglieder"),
"icon": "FaUserFriends",
"path": "/admin/user-mandates",
"order": 40,
@@ -247,7 +268,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-access",
"objectKey": "ui.admin.access",
- "label": {"en": "Access Management", "de": "Zugriffsverwaltung", "fr": "Gestion des accès"},
+ "label": t("Zugriffsverwaltung"),
"icon": "FaBuilding",
"path": "/admin/access",
"order": 50,
@@ -256,7 +277,7 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-feature-instances",
"objectKey": "ui.admin.featureInstances",
- "label": {"en": "Feature Instances", "de": "Feature-Instanzen", "fr": "Instances de features"},
+ "label": t("Feature-Instanzen"),
"icon": "FaCubes",
"path": "/admin/feature-instances",
"order": 60,
@@ -265,43 +286,43 @@ NAVIGATION_SECTIONS = [
{
"id": "admin-feature-roles",
"objectKey": "ui.admin.featureRoles",
- "label": {"en": "Feature Role Templates", "de": "Features Rollen-Vorlagen", "fr": "Modèles de rôles features"},
+ "label": t("Features Rollen-Vorlagen"),
"icon": "FaShieldAlt",
"path": "/admin/feature-roles",
"order": 70,
"adminOnly": True,
"sysAdminOnly": True,
},
- {
- "id": "admin-automation-events",
- "objectKey": "ui.admin.automationEvents",
- "label": {"en": "Automation Events", "de": "Automation Events", "fr": "Événements d'automatisation"},
- "icon": "FaClock",
- "path": "/admin/automation-events",
- "order": 80,
- "adminOnly": True,
- "sysAdminOnly": True,
- },
- {
- "id": "admin-automation-logs",
- "objectKey": "ui.admin.automationLogs",
- "label": {"en": "Execution Logs", "de": "Ausführungsprotokolle", "fr": "Journaux d'exécution"},
- "icon": "FaClipboardList",
- "path": "/admin/automation-logs",
- "order": 85,
- "adminOnly": True,
- "sysAdminOnly": True,
- },
{
"id": "admin-logs",
"objectKey": "ui.admin.logs",
- "label": {"en": "Logs", "de": "Logs", "fr": "Logs"},
+ "label": t("Logs"),
"icon": "FaFileAlt",
"path": "/admin/logs",
"order": 90,
"adminOnly": True,
"sysAdminOnly": True,
},
+ {
+ "id": "admin-languages",
+ "objectKey": "ui.admin.languages",
+ "label": t("UI-Sprachen"),
+ "icon": "FaGlobe",
+ "path": "/admin/languages",
+ "order": 95,
+ "adminOnly": True,
+ "sysAdminOnly": True,
+ },
+ {
+ "id": "admin-demo-config",
+ "objectKey": "ui.admin.demoConfig",
+ "label": t("Demo Config"),
+ "icon": "FaCubes",
+ "path": "/admin/demo-config",
+ "order": 100,
+ "adminOnly": True,
+ "sysAdminOnly": True,
+ },
],
},
],
@@ -369,7 +390,6 @@ UI_OBJECTS = _buildUiObjectsFromNavigation()
# - data.uam.* → User Access Management (mandantenübergreifend)
# - data.chat.* → Chat/AI-Daten (benutzer-eigen, kein Mandantenkontext)
# - data.files.* → Dateien (benutzer-eigen)
-# - data.automation.* → Automation (benutzer-eigen)
# - data.feature.* → Mandanten-/Feature-spezifische Daten (dynamisch)
# =============================================================================
@@ -377,72 +397,66 @@ DATA_OBJECTS = [
# UAM (User Access Management) - mandantenübergreifend
{
"objectKey": "data.uam.UserInDB",
- "label": {"en": "User", "de": "Benutzer", "fr": "Utilisateur"},
+ "label": "Benutzer",
"meta": {"table": "UserInDB", "namespace": "uam"}
},
{
"objectKey": "data.uam.AuthEvent",
- "label": {"en": "Auth Event", "de": "Auth-Ereignis", "fr": "Événement d'auth"},
+ "label": "Auth-Ereignis",
"meta": {"table": "AuthEvent", "namespace": "uam"}
},
{
"objectKey": "data.uam.UserConnection",
- "label": {"en": "Connection", "de": "Verbindung", "fr": "Connexion"},
+ "label": "Verbindung",
"meta": {"table": "UserConnection", "namespace": "uam"}
},
{
"objectKey": "data.uam.Mandate",
- "label": {"en": "Mandate", "de": "Mandant", "fr": "Mandat"},
+ "label": "Mandant",
"meta": {"table": "Mandate", "namespace": "uam"}
},
{
"objectKey": "data.uam.UserMandate",
- "label": {"en": "User Mandate", "de": "Benutzer-Mandant", "fr": "Mandat utilisateur"},
+ "label": "Benutzer-Mandant",
"meta": {"table": "UserMandate", "namespace": "uam"}
},
{
"objectKey": "data.uam.Invitation",
- "label": {"en": "Invitation", "de": "Einladung", "fr": "Invitation"},
+ "label": "Einladung",
"meta": {"table": "Invitation", "namespace": "uam"}
},
{
"objectKey": "data.uam.Role",
- "label": {"en": "Role", "de": "Rolle", "fr": "Rôle"},
+ "label": "Rolle",
"meta": {"table": "Role", "namespace": "uam"}
},
{
"objectKey": "data.uam.AccessRule",
- "label": {"en": "Access Rule", "de": "Zugriffsregel", "fr": "Règle d'accès"},
+ "label": "Zugriffsregel",
"meta": {"table": "AccessRule", "namespace": "uam"}
},
{
"objectKey": "data.uam.FeatureInstance",
- "label": {"en": "Feature Instance", "de": "Feature-Instanz", "fr": "Instance de feature"},
+ "label": "Feature-Instanz",
"meta": {"table": "FeatureInstance", "namespace": "uam"}
},
# Chat - benutzer-eigen, kein Mandantenkontext
{
"objectKey": "data.chat.Prompt",
- "label": {"en": "Prompt", "de": "Prompt", "fr": "Prompt"},
+ "label": "Prompt",
"meta": {"table": "Prompt", "namespace": "chat", "groupDisabled": True}
},
{
"objectKey": "data.chat.ChatWorkflow",
- "label": {"en": "Chat Workflow", "de": "Chat-Workflow", "fr": "Workflow de chat"},
+ "label": "Chat-Workflow",
"meta": {"table": "ChatWorkflow", "namespace": "chat", "groupDisabled": True}
},
# Files - benutzer-eigen
{
"objectKey": "data.files.FileItem",
- "label": {"en": "File", "de": "Datei", "fr": "Fichier"},
+ "label": "Datei",
"meta": {"table": "FileItem", "namespace": "files", "groupDisabled": True}
},
- # Automation - benutzer-eigen
- {
- "objectKey": "data.automation.AutomationDefinition",
- "label": {"en": "Automation", "de": "Automatisierung", "fr": "Automatisation"},
- "meta": {"table": "AutomationDefinition", "namespace": "automation", "groupDisabled": True}
- },
]
# =============================================================================
@@ -450,49 +464,39 @@ DATA_OBJECTS = [
# =============================================================================
RESOURCE_OBJECTS = [
- {
- "objectKey": "resource.store.automation",
- "label": {"en": "Store: Automation", "de": "Store: Automation", "fr": "Store: Automatisation"},
- "meta": {"category": "store", "featureCode": "automation"}
- },
- {
- "objectKey": "resource.store.automation2",
- "label": {"en": "Store: Automation 2", "de": "Store: Automation 2", "fr": "Store: Automatisation 2"},
- "meta": {"category": "store", "featureCode": "automation2"}
- },
{
"objectKey": "resource.store.teamsbot",
- "label": {"en": "Store: Teams Bot", "de": "Store: Teams Bot", "fr": "Store: Teams Bot"},
+ "label": "Store: Teams Bot",
"meta": {"category": "store", "featureCode": "teamsbot"}
},
{
"objectKey": "resource.store.workspace",
- "label": {"en": "Store: AI Workspace", "de": "Store: AI Workspace", "fr": "Store: AI Workspace"},
+ "label": "Store: AI Workspace",
"meta": {"category": "store", "featureCode": "workspace"}
},
{
"objectKey": "resource.store.commcoach",
- "label": {"en": "Store: CommCoach", "de": "Store: CommCoach", "fr": "Store: CommCoach"},
+ "label": "Store: CommCoach",
"meta": {"category": "store", "featureCode": "commcoach"}
},
{
"objectKey": "resource.system.api.auth",
- "label": {"en": "Authentication API", "de": "Authentifizierungs-API", "fr": "API d'authentification"},
+ "label": "Authentifizierungs-API",
"meta": {"endpoint": "/api/auth/*"}
},
{
"objectKey": "resource.system.api.users",
- "label": {"en": "Users API", "de": "Benutzer-API", "fr": "API des utilisateurs"},
+ "label": "Benutzer-API",
"meta": {"endpoint": "/api/users/*"}
},
{
"objectKey": "resource.system.api.mandates",
- "label": {"en": "Mandates API", "de": "Mandanten-API", "fr": "API des mandats"},
+ "label": "Mandanten-API",
"meta": {"endpoint": "/api/mandates/*"}
},
{
"objectKey": "resource.system.api.rbac",
- "label": {"en": "RBAC API", "de": "RBAC-API", "fr": "API RBAC"},
+ "label": "RBAC-API",
"meta": {"endpoint": "/api/rbac/*"}
},
]
@@ -504,13 +508,13 @@ def _discoverAicoreProviderObjects() -> List[Dict[str, Any]]:
Providers are discovered from the model registry at startup.
"""
providerLabels = {
- "anthropic": {"en": "Anthropic (Claude)", "de": "Anthropic (Claude)", "fr": "Anthropic (Claude)"},
- "openai": {"en": "OpenAI (GPT)", "de": "OpenAI (GPT)", "fr": "OpenAI (GPT)"},
- "mistral": {"en": "Mistral (Le Chat)", "de": "Mistral (Le Chat)", "fr": "Mistral (Le Chat)"},
- "perplexity": {"en": "Perplexity", "de": "Perplexity", "fr": "Perplexity"},
- "tavily": {"en": "Tavily (Web Search)", "de": "Tavily (Websuche)", "fr": "Tavily (Recherche Web)"},
- "privatellm": {"en": "Private LLM", "de": "Private LLM", "fr": "LLM Privé"},
- "internal": {"en": "Internal", "de": "Intern", "fr": "Interne"},
+ "anthropic": "Anthropic (Claude)",
+ "openai": "OpenAI (GPT)",
+ "mistral": "Mistral (Le Chat)",
+ "perplexity": "Perplexity",
+ "tavily": "Tavily (Websuche)",
+ "privatellm": "Private LLM",
+ "internal": "Intern",
}
try:
@@ -520,7 +524,7 @@ def _discoverAicoreProviderObjects() -> List[Dict[str, Any]]:
objects = []
for provider in providers:
- label = providerLabels.get(provider, {"en": provider, "de": provider, "fr": provider})
+ label = providerLabels.get(provider, provider)
objects.append({
"objectKey": f"resource.aicore.{provider}",
"label": label,
diff --git a/modules/workflows/automation/__init__.py b/modules/workflows/automation/__init__.py
deleted file mode 100644
index 89022240..00000000
--- a/modules/workflows/automation/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Workflow feature - handles workflow execution, scheduling, and start/stop operations.
-"""
-
-from .mainWorkflow import chatStart, chatStop, executeAutomation, syncAutomationEvents, createAutomationEventHandler
-
-__all__ = ['chatStart', 'chatStop', 'executeAutomation', 'syncAutomationEvents', 'createAutomationEventHandler']
-
diff --git a/modules/workflows/automation/mainWorkflow.py b/modules/workflows/automation/mainWorkflow.py
deleted file mode 100644
index dc387926..00000000
--- a/modules/workflows/automation/mainWorkflow.py
+++ /dev/null
@@ -1,325 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Main workflow service - handles workflow execution, scheduling, and start/stop operations.
-"""
-
-import logging
-import json
-from typing import Dict, Any, Optional
-
-from modules.datamodels.datamodelChat import ChatWorkflow, UserInputRequest, WorkflowModeEnum
-from modules.features.automation.datamodelFeatureAutomation import AutomationDefinition
-from modules.datamodels.datamodelUam import User
-from modules.shared.timeUtils import getUtcTimestamp
-from modules.shared.eventManagement import eventManager
-from modules.features.automation.mainAutomation import getAutomationServices
-from modules.workflows.workflowManager import WorkflowManager
-from .subAutomationUtils import parseScheduleToCron, planToPrompt, replacePlaceholders
-
-logger = logging.getLogger(__name__)
-
-
-async def chatStart(currentUser: User, userInput: UserInputRequest, workflowMode: WorkflowModeEnum, workflowId: Optional[str] = None, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None, services=None) -> ChatWorkflow:
- """
- Starts a new chat or continues an existing one, then launches processing asynchronously.
-
- Args:
- currentUser: Current user
- userInput: User input request
- workflowId: Optional workflow ID to continue existing workflow
- workflowMode: Workflow mode (Dynamic, Automation, etc.)
- mandateId: Mandate ID (required for billing)
- featureInstanceId: Feature instance ID (required for billing)
- featureCode: Feature code (e.g., 'automation')
- services: Pre-built service hub from the calling feature (required). Each feature must pass its own services.
- """
- if services is None:
- raise ValueError("services is required: each feature must pass its own service hub (e.g. getAutomationServices)")
- try:
-
- # Store allowedProviders in services context for model selection
- if hasattr(userInput, 'allowedProviders') and userInput.allowedProviders:
- services.allowedProviders = userInput.allowedProviders
- logger.info(f"AI provider filter active: {userInput.allowedProviders}")
-
- # Store feature code in services (for billing)
- if featureCode:
- services.featureCode = featureCode
-
- workflowManager = WorkflowManager(services)
- workflow = await workflowManager.workflowStart(userInput, workflowMode, workflowId)
- return workflow
- except Exception as e:
- logger.error(f"Error starting chat: {str(e)}")
- raise
-
-async def chatStop(currentUser: User, workflowId: str, mandateId: Optional[str] = None, featureInstanceId: Optional[str] = None, featureCode: Optional[str] = None, services=None) -> ChatWorkflow:
- """Stops a running chat. Caller must pass services from the owning feature."""
- if services is None:
- raise ValueError("services is required: each feature must pass its own service hub (e.g. getAutomationServices)")
- try:
- if featureCode:
- services.featureCode = featureCode
- workflowManager = WorkflowManager(services)
- return await workflowManager.workflowStop(workflowId)
- except Exception as e:
- logger.error(f"Error stopping chat: {str(e)}")
- raise
-
-
-async def executeAutomation(automationId: str, automation, creatorUser: User, services) -> ChatWorkflow:
- """Execute automation workflow with the creator user's context.
-
- The automation object and creatorUser are resolved by the caller (handler)
- using the SysAdmin eventUser. This function does NOT re-load them.
-
- Args:
- automationId: ID of automation to execute
- automation: Pre-loaded automation object (with system fields like sysCreatedBy)
- creatorUser: The user who created the automation (workflow runs in this context)
- services: Services instance (used for interfaceDbApp etc.)
-
- Returns:
- ChatWorkflow instance created by automation execution
- """
- executionStartTime = getUtcTimestamp()
- executionLog = {
- "timestamp": executionStartTime,
- "workflowId": None,
- "status": "running",
- "messages": []
- }
-
- try:
- executionLog["messages"].append(f"Started execution at {executionStartTime}")
-
- # Store allowed providers from automation in services context
- if hasattr(automation, 'allowedProviders') and automation.allowedProviders:
- services.allowedProviders = automation.allowedProviders
- logger.debug(f"Automation {automationId} restricted to providers: {automation.allowedProviders}")
-
- # Context comes EXCLUSIVELY from the automation definition
- automationMandateId = str(automation.mandateId) if automation.mandateId is not None else None
- automationFeatureInstanceId = str(automation.featureInstanceId) if automation.featureInstanceId is not None else None
-
- if not automationMandateId or not automationFeatureInstanceId:
- raise ValueError(f"Automation {automationId} missing mandateId or featureInstanceId")
-
- logger.info(f"Executing automation {automationId} as user {creatorUser.id} with mandateId={automationMandateId}, featureInstanceId={automationFeatureInstanceId}")
-
- # 1. Replace placeholders in template to generate plan
- template = automation.template or ""
- placeholders = automation.placeholders or {}
- planJson = replacePlaceholders(template, placeholders)
- try:
- plan = json.loads(planJson)
- except json.JSONDecodeError as e:
- logger.error(f"Failed to parse plan JSON after placeholder replacement: {str(e)}")
- logger.error(f"Template: {template[:500]}...")
- logger.error(f"Placeholders: {placeholders}")
- logger.error(f"Generated planJson (first 1000 chars): {planJson[:1000]}")
- logger.error(f"Error position: line {e.lineno}, column {e.colno}, char {e.pos}")
- if e.pos is not None:
- start = max(0, e.pos - 100)
- end = min(len(planJson), e.pos + 100)
- logger.error(f"Context around error: ...{planJson[start:end]}...")
- raise ValueError(f"Invalid JSON after placeholder replacement: {str(e)}")
- executionLog["messages"].append("Template placeholders replaced successfully")
- executionLog["messages"].append(f"Using creator user: {creatorUser.id}")
-
- # 2. Create UserInputRequest from plan
- # Embed plan JSON in prompt for TemplateMode to extract
- promptText = planToPrompt(plan)
- planJsonStr = json.dumps(plan)
- # Embed plan as JSON comment so TemplateMode can extract it
- promptWithPlan = f"{promptText}\n\n\n{planJsonStr}\n"
-
- userInput = UserInputRequest(
- prompt=promptWithPlan,
- listFileId=[],
- userLanguage=creatorUser.language or "en"
- )
-
- executionLog["messages"].append("Starting workflow execution")
-
- # 3. Start workflow using chatStart with creator's context
- # mandateId and featureInstanceId come from the automation definition
- # Each feature must pass its own services - no fallback
- creatorServices = getAutomationServices(
- creatorUser,
- mandateId=automationMandateId,
- featureInstanceId=automationFeatureInstanceId,
- )
- workflow = await chatStart(
- currentUser=creatorUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_AUTOMATION,
- workflowId=None,
- mandateId=automationMandateId,
- featureInstanceId=automationFeatureInstanceId,
- featureCode='automation',
- services=creatorServices,
- )
-
- executionLog["workflowId"] = workflow.id
- executionLog["status"] = "completed"
- executionLog["messages"].append(f"Workflow {workflow.id} started successfully")
- logger.info(f"Started workflow {workflow.id} with plan containing {len(plan.get('tasks', []))} tasks (plan embedded in userInput)")
-
- # Set workflow name with "automated" prefix — use creatorServices from chatStart
- automationLabel = automation.label or "Unknown Automation"
- workflowName = f"automated: {automationLabel}"
- creatorServices.interfaceDbChat.updateWorkflow(workflow.id, {"name": workflowName})
- logger.info(f"Set workflow {workflow.id} name to: {workflowName}")
-
- # Save execution log (bypasses RBAC — system operation, not a user edit)
- executionLogs = list(automation.executionLogs or [])
- executionLogs.append(executionLog)
- # Keep only last 50 executions
- if len(executionLogs) > 50:
- executionLogs = executionLogs[-50:]
-
- services.interfaceDbAutomation._saveExecutionLog(automationId, executionLogs)
-
- return workflow
- except Exception as e:
- # Log error to execution log
- executionLog["status"] = "error"
- executionLog["messages"].append(f"Error: {str(e)}")
-
- # Save execution log even on error (bypasses RBAC — system operation)
- # Use the automation object already passed in (no re-load needed)
- try:
- executionLogs = list(getattr(automation, 'executionLogs', None) or [])
- executionLogs.append(executionLog)
- if len(executionLogs) > 50:
- executionLogs = executionLogs[-50:]
- services.interfaceDbAutomation._saveExecutionLog(automationId, executionLogs)
- except Exception as logError:
- logger.error(f"Error saving execution log: {str(logError)}")
-
- raise
-
-
-def syncAutomationEvents(services, eventUser) -> Dict[str, Any]:
- """Sync scheduler with all active automations.
- All operations (DB reads, scheduler registration) are synchronous.
-
- Args:
- services: Services instance for data access
- eventUser: System-level event user for accessing automations
-
- Returns:
- Dictionary with sync results (synced count and event IDs)
- """
- # Get all automation definitions filtered by RBAC (for current mandate)
- filtered = services.interfaceDbAutomation.getAllAutomationDefinitionsWithRBAC(eventUser)
-
- registeredEvents = {}
-
- for automation in filtered:
- # Handle both dict and object access patterns
- if isinstance(automation, dict):
- automationId = automation.get('id')
- isActive = automation.get('active', False)
- currentEventId = automation.get('eventId')
- schedule = automation.get('schedule')
- else:
- automationId = automation.id
- isActive = automation.active if hasattr(automation, 'active') else False
- currentEventId = automation.eventId if hasattr(automation, 'eventId') else None
- schedule = automation.schedule if hasattr(automation, 'schedule') else None
-
- if not schedule:
- logger.warning(f"Automation {automationId} has no schedule, skipping")
- continue
-
- try:
- # Parse schedule to cron kwargs
- cronKwargs = parseScheduleToCron(schedule)
-
- if isActive:
- newEventId = f"automation.{automationId}"
- handler = createAutomationEventHandler(automationId, eventUser)
-
- # Register with replaceExisting=True (atomically replaces old event)
- eventManager.registerCron(
- jobId=newEventId,
- func=handler,
- cronKwargs=cronKwargs,
- replaceExisting=True
- )
-
- # Update automation with new eventId
- if currentEventId != newEventId:
- services.interfaceDbAutomation.updateAutomationDefinition(
- automationId,
- {"eventId": newEventId}
- )
-
- registeredEvents[automationId] = newEventId
- else:
- # Remove event if exists
- if currentEventId:
- try:
- eventManager.remove(currentEventId)
- services.interfaceDbAutomation.updateAutomationDefinition(
- automationId,
- {"eventId": None}
- )
- except Exception as e:
- logger.warning(f"Error removing event {currentEventId}: {str(e)}")
- except Exception as e:
- logger.error(f"Error syncing automation {automationId}: {str(e)}")
-
- return {
- "synced": len(registeredEvents),
- "events": registeredEvents
- }
-
-
-def createAutomationEventHandler(automationId: str, eventUser):
- """Create event handler function for a specific automation.
-
- Args:
- automationId: ID of automation to create handler for
- eventUser: System-level event user for accessing automations (captured in closure)
-
- Returns:
- Async handler function for scheduled automation execution
- """
- async def handler():
- try:
- if not eventUser:
- logger.error("Event user not available for automation execution")
- return
-
- # Load automation using SysAdmin eventUser (has unrestricted access)
- eventServices = getAutomationServices(eventUser, mandateId=None, featureInstanceId=None)
- automation = eventServices.interfaceDbAutomation.getAutomationDefinition(automationId, includeSystemFields=True)
- if not automation or not getattr(automation, "active", False):
- logger.warning(f"Automation {automationId} not found or not active, skipping execution")
- return
-
- # Get creator user ID from automation's sysCreatedBy system field
- creatorUserId = getattr(automation, "sysCreatedBy", None)
- if not creatorUserId:
- logger.error(f"Automation {automationId} has no creator user (sysCreatedBy missing)")
- return
-
- # Get creator user from database (using SysAdmin access)
- creatorUser = eventServices.interfaceDbApp.getUser(creatorUserId)
- if not creatorUser:
- logger.error(f"Creator user {creatorUserId} not found for automation {automationId}")
- return
-
- # Execute automation — pass automation object and creatorUser directly
- # No re-load needed in executeAutomation
- await executeAutomation(automationId, automation, creatorUser, eventServices)
- logger.info(f"Successfully executed automation {automationId} as user {creatorUserId}")
- except Exception as e:
- logger.error(f"Error executing automation {automationId}: {str(e)}")
-
- return handler
-
diff --git a/modules/workflows/automation/subAutomationSchedule.py b/modules/workflows/automation/subAutomationSchedule.py
deleted file mode 100644
index 18cc4245..00000000
--- a/modules/workflows/automation/subAutomationSchedule.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Automation Lifecycle Manager.
-Handles startup and shutdown of scheduled automations.
-
-Note: This module is NOT for feature container lifecycle - it only manages
-the automation scheduler (loading/syncing scheduled automation events).
-"""
-
-import logging
-from modules.features.automation.mainAutomation import getAutomationServices
-
-logger = logging.getLogger(__name__)
-
-
-def start(eventUser) -> bool:
- """
- Start automation scheduler and sync scheduled events.
- All operations are synchronous (DB access, scheduler registration).
-
- Args:
- eventUser: System-level event user for background operations (provided by app.py)
- """
- if not eventUser:
- logger.warning("Automation: No event user provided, skipping automation sync")
- return True
-
- try:
- from modules.workflows.automation import syncAutomationEvents
- from modules.shared.callbackRegistry import callbackRegistry
-
- # Get services for event user (provides access to interfaces)
- services = getAutomationServices(eventUser, mandateId=None, featureInstanceId=None)
-
- # Register callback for automation changes
- def onAutomationChanged(chatInterface):
- """Callback triggered when automations are created/updated/deleted."""
- eventServices = getAutomationServices(eventUser, mandateId=None, featureInstanceId=None)
- syncAutomationEvents(eventServices, eventUser)
-
- callbackRegistry.register('automation.changed', onAutomationChanged)
- logger.info("Automation: Registered change callback")
-
- # Initial sync on startup
- syncAutomationEvents(services, eventUser)
- logger.info("Automation: Scheduled events synced on startup")
-
- except Exception as e:
- logger.error(f"Automation: Error setting up events on startup: {str(e)}")
- return False
-
- return True
-
-
-def stop(eventUser) -> bool:
- """
- Stop automation scheduler.
-
- Args:
- eventUser: System-level event user (provided by app.py)
- """
- # Callbacks will remain registered (acceptable for shutdown)
- logger.info("Automation: Scheduler stopped (callbacks cleaned up on shutdown)")
- return True
diff --git a/modules/workflows/automation/subAutomationTemplates.py b/modules/workflows/automation/subAutomationTemplates.py
deleted file mode 100644
index eb131f0a..00000000
--- a/modules/workflows/automation/subAutomationTemplates.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Automation templates for workflow definitions.
-
-Contains predefined workflow templates that can be used to create automation definitions.
-"""
-
-from typing import Dict, Any
-
-# Automation templates structure
-AUTOMATION_TEMPLATES: Dict[str, Any] = {
- "sets": [
- {
- "template": {
- "overview": "SharePoint Themen Zusammenfassung",
- "tasks": [
- {
- "id": "Task01",
- "title": "SharePoint Themen Zusammenfassung",
- "description": "Erstellt eine Zusammenfassung aller SharePoint Sites und deren Inhalte",
- "objective": "Erstelle eine Zusammenfassung aller SharePoint Themen (Sites) und deren Inhalte als Word-Dokument",
- "actionList": [
- {
- "execMethod": "sharepoint",
- "execAction": "findDocumentPath",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "searchQuery": "*",
- "maxResults": 100
- },
- "execResultLabel": "sharepoint_sites_found"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "listDocuments",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "pathQuery": "{{KEY:sharepointBasePath}}",
- "includeSubfolders": True
- },
- "execResultLabel": "sharepoint_structure"
- },
- {
- "execMethod": "ai",
- "execAction": "process",
- "execParameters": {
- "aiPrompt": "{{KEY:summaryPrompt}}",
- "documentList": ["sharepoint_sites_found", "sharepoint_structure"],
- "resultType": "docx"
- },
- "execResultLabel": "sharepoint_summary"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadDocument",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "documentList": ["sharepoint_summary"],
- "pathQuery": "{{KEY:sharepointFolderNameDestination}}"
- },
- "execResultLabel": "sharepoint_upload_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "connection:msft:p.motsch@valueon.ch",
- "sharepointBasePath": "/sites/company-share",
- "sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
- "summaryPrompt": "Erstelle eine umfassende Zusammenfassung aller SharePoint Sites und deren Inhalte. Strukturiere das Dokument nach Sites und fasse für jede Site die wichtigsten Themen, Ordnerstrukturen und Dokumente zusammen. Erstelle ein professionelles Word-Dokument mit Überschriften, Abschnitten und einer klaren Gliederung. Berücksichtige alle gefundenen Sites, deren Ordnerstrukturen und dokumentiere die wichtigsten Inhalte pro Site."
- }
- },
- {
- "template": {
- "overview": "Immobilienrecherche Zürich",
- "tasks": [
- {
- "id": "Task02",
- "title": "Immobilienrecherche Zürich",
- "description": "Webrecherche nach Immobilien im Kanton Zürich und Speicherung in Excel",
- "objective": "Immobilienrecherche im Kanton Zürich zum Verkauf (5-20 Mio. CHF) und speichere Ergebnisse in Excel-Liste auf SharePoint",
- "actionList": [
- {
- "execMethod": "ai",
- "execAction": "webResearch",
- "execParameters": {
- "prompt": "{{KEY:immobilienResearchPrompt}}",
- "urlList": ["{{KEY:immobilienResearchUrl}}"]
- },
- "execResultLabel": "immobilien_research_results"
- },
- {
- "execMethod": "ai",
- "execAction": "process",
- "execParameters": {
- "aiPrompt": "{{KEY:excelFormatPrompt}}",
- "documentList": ["immobilien_research_results"],
- "resultType": "xlsx"
- },
- "execResultLabel": "immobilien_excel_list"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadDocument",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "documentList": ["immobilien_excel_list"],
- "pathQuery": "{{KEY:sharepointFolderNameDestination}}"
- },
- "execResultLabel": "immobilien_upload_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "connection:msft:p.motsch@valueon.ch",
- "sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
- "immobilienResearchUrl": ["https://www.homegate.ch", "https://www.immoscout24.ch", "https://www.immowelt.ch"],
- "immobilienResearchPrompt": "Suche nach Immobilien zum Verkauf im Kanton Zürich, Schweiz, im Preisbereich von 5-20 Millionen CHF. Sammle Informationen zu: Ort, Preis, Beschreibung, URL zu Bildern, Verkäufer/Kontaktinformationen.",
- "excelFormatPrompt": "Erstelle eine Excel-Datei mit den recherchierten Immobilien. Jede Immobilie soll eine Zeile sein mit den folgenden Spalten: Ort, Preis (in CHF), Beschreibung, URL zu Bild, Verkäufer. Verwende die Daten aus der Webrecherche."
- }
- },
- {
- "template": {
- "overview": "Spesenbelege Zusammenfassung",
- "tasks": [
- {
- "id": "Task03",
- "title": "Spesenbelege CSV Zusammenfassung",
- "description": "Liest PDF-Spesenbelege aus SharePoint-Ordner und erstellt CSV-Zusammenfassung",
- "objective": "Extrahiere alle PDF-Spesenbelege aus einem SharePoint-Ordner und erstelle eine CSV-Datei mit allen Spesendaten im selben Ordner",
- "actionList": [
- {
- "execMethod": "sharepoint",
- "execAction": "findDocumentPath",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "searchQuery": "{{KEY:sharepointFolderNameSource}}:files:.pdf",
- "maxResults": 100
- },
- "execResultLabel": "sharepoint_pdf_files"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "readDocuments",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "pathObject": "sharepoint_pdf_files"
- },
- "execResultLabel": "spesenbelege_documents"
- },
- {
- "execMethod": "ai",
- "execAction": "process",
- "execParameters": {
- "aiPrompt": "{{KEY:expenseExtractionPrompt}}",
- "documentList": ["spesenbelege_documents"],
- "resultType": "csv"
- },
- "execResultLabel": "spesenbelege_csv"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadDocument",
- "execParameters": {
- "connectionReference": "{{KEY:connectionName}}",
- "documentList": ["spesenbelege_csv"],
- "pathQuery": "{{KEY:sharepointFolderNameDestination}}"
- },
- "execResultLabel": "spesenbelege_upload_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "connectionName": "connection:msft:p.motsch@valueon.ch",
- "sharepointFolderNameSource": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/expenses",
- "sharepointFolderNameDestination": "/sites/company-share/Freigegebene Dokumente/15. Persoenliche Ordner/Patrick Motsch/output",
- "expenseExtractionPrompt": "Verarbeite alle bereitgestellten Dokumente, aber extrahiere nur Daten aus PDF-Spesenbelegen (ignoriere andere Dateitypen). Für jeden gefundenen PDF-Spesenbeleg extrahiere als separaten Datensatz: Datum, Betrag, MWST %, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler/Verkäufer, Steuerbetrag. Erstelle eine CSV-Datei mit einer Zeile pro Spesenbeleg. Verwende die folgenden Spaltenüberschriften: Datum, Betrag, Währung, Kategorie, Beschreibung, Rechnungsnummer, Händler, Steuerbetrag. Stelle sicher, dass alle Beträge numerisch sind und Datumswerte im Format YYYY-MM-DD vorliegen. Wenn ein Dokument kein Spesenbeleg ist, ignoriere es."
- }
- },
- {
- "template": {
- "overview": "Preprocessing Server Data Update",
- "tasks": [
- {
- "id": "Task04",
- "title": "Trigger Preprocessing Server",
- "description": "Triggers the preprocessing server at customer tenant to update database with configuration",
- "objective": "Call preprocessing server endpoint to update database with provided configuration JSON",
- "actionList": [
- {
- "execMethod": "context",
- "execAction": "triggerPreprocessingServer",
- "execParameters": {
- "endpoint": "{{KEY:endpoint}}",
- "configJson": "{{KEY:configJson}}",
- "authSecretConfigKey": "{{KEY:authSecretConfigKey}}"
- },
- "execResultLabel": "preprocessing_server_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "endpoint": "https://poweron-althaus-preprocess-prod-e3fegaatc7faency.switzerlandnorth-01.azurewebsites.net/api/v1/dataprocessor/update-db-with-config",
- "authSecretConfigKey": "PREPROCESS_ALTHAUS_CHAT_SECRET",
- "configJson": "{\"tables\":[{\"name\":\"Artikel\",\"powerbi_table_name\":\"Artikel\",\"steps\":[{\"keep\":{\"columns\":[\"I_ID\",\"Artikelbeschrieb\",\"Artikelbezeichnung\",\"Artikelgruppe\",\"Artikelkategorie\",\"Artikelkürzel\",\"Artikelnummer\",\"Einheit\",\"Gesperrt\",\"Keywords\",\"Lieferant\",\"Warengruppe\"]}},{\"fillna\":{\"column\":\"Lieferant\",\"value\":\"Unbekannt\"}}]},{\"name\":\"Einkaufspreis\",\"powerbi_table_name\":\"Einkaufspreis\",\"steps\":[{\"to_numeric\":{\"column\":\"EP_CHF\",\"errors\":\"coerce\"}},{\"dropna\":{\"subset\":[\"EP_CHF\"]}}]}]}"
- }
- },
- {
- "template": {
- "overview": "JIRA to SharePoint Ticket Synchronization",
- "tasks": [
- {
- "id": "Task01",
- "title": "Sync JIRA Tickets to SharePoint",
- "description": "Export JIRA tickets, merge with SharePoint file, upload back, and import changes to JIRA",
- "objective": "Synchronize JIRA tickets with SharePoint file (bidirectional sync)",
- "actionList": [
- {
- "execMethod": "sharepoint",
- "execAction": "findSiteByUrl",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "hostname": "{{KEY:sharepointHostname}}",
- "sitePath": "{{KEY:sharepointSitePath}}"
- },
- "execResultLabel": "sharepoint_site"
- },
- {
- "execMethod": "jira",
- "execAction": "connectJira",
- "execParameters": {
- "apiUsername": "{{KEY:jiraUsername}}",
- "apiTokenConfigKey": "{{KEY:jiraTokenConfigKey}}",
- "apiUrl": "{{KEY:jiraUrl}}",
- "projectCode": "{{KEY:jiraProjectCode}}",
- "issueType": "{{KEY:jiraIssueType}}",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "jira_connection"
- },
- {
- "execMethod": "jira",
- "execAction": "exportTicketsAsJson",
- "execParameters": {
- "connectionId": "jira_connection",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "jira_exported_tickets"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "downloadFileByPath",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
- },
- "execResultLabel": "existing_file_content"
- },
- {
- "execMethod": "jira",
- "execAction": "parseExcelContent",
- "execParameters": {
- "excelContent": "existing_file_content",
- "skipRows": 3,
- "hasCustomHeaders": True
- },
- "execResultLabel": "existing_parsed_data"
- },
- {
- "execMethod": "jira",
- "execAction": "mergeTicketData",
- "execParameters": {
- "jiraData": "jira_exported_tickets",
- "existingData": "existing_parsed_data",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}",
- "idField": "ID"
- },
- "execResultLabel": "merged_ticket_data"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "copyFile",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "sourceFolder": "{{KEY:sharepointMainFolder}}",
- "sourceFile": "{{KEY:syncFileName}}",
- "destFolder": "{{KEY:sharepointBackupFolder}}",
- "destFile": "backup_{{TIMESTAMP}}_{{KEY:syncFileName}}"
- },
- "execResultLabel": "file_backup"
- },
- {
- "execMethod": "jira",
- "execAction": "createExcelContent",
- "execParameters": {
- "data": "merged_ticket_data",
- "headers": "existing_parsed_data",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "new_file_content"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "uploadFile",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "folderPath": "{{KEY:sharepointMainFolder}}",
- "fileName": "{{KEY:syncFileName}}",
- "content": "new_file_content"
- },
- "execResultLabel": "uploaded_file"
- },
- {
- "execMethod": "sharepoint",
- "execAction": "downloadFileByPath",
- "execParameters": {
- "connectionReference": "{{KEY:sharepointConnection}}",
- "siteId": "sharepoint_site",
- "filePath": "{{KEY:sharepointMainFolder}}/{{KEY:syncFileName}}"
- },
- "execResultLabel": "uploaded_file_content"
- },
- {
- "execMethod": "jira",
- "execAction": "parseExcelContent",
- "execParameters": {
- "excelContent": "uploaded_file_content",
- "skipRows": 3,
- "hasCustomHeaders": True
- },
- "execResultLabel": "import_data"
- },
- {
- "execMethod": "jira",
- "execAction": "importTicketsFromJson",
- "execParameters": {
- "connectionId": "jira_connection",
- "ticketData": "import_data",
- "taskSyncDefinition": "{{KEY:taskSyncDefinition}}"
- },
- "execResultLabel": "import_result"
- }
- ]
- }
- ]
- },
- "parameters": {
- "sharepointConnection": "connection:msft:patrick.motsch@delta.ch",
- "sharepointHostname": "deltasecurityag.sharepoint.com",
- "sharepointSitePath": "SteeringBPM",
- "sharepointMainFolder": "/General/50 Docs hosted by SELISE",
- "sharepointBackupFolder": "/General/50 Docs hosted by SELISE/SyncHistory",
- "syncFileName": "DELTAgroup x SELISE Ticket Exchange List.xlsx",
- "jiraUsername": "p.motsch@valueon.ch",
- "jiraTokenConfigKey": "Feature_SyncDelta_JIRA_DELTA_TOKEN_SECRET",
- "jiraUrl": "https://deltasecurity.atlassian.net",
- "jiraProjectCode": "DCS",
- "jiraIssueType": "Task",
- "taskSyncDefinition": "{\"ID\":[\"get\",[\"key\"]],\"Module Category\":[\"get\",[\"fields\",\"customfield_10058\",\"value\"]],\"Summary\":[\"get\",[\"fields\",\"summary\"]],\"Description\":[\"get\",[\"fields\",\"description\"]],\"References\":[\"get\",[\"fields\",\"customfield_10066\"]],\"Priority\":[\"get\",[\"fields\",\"priority\",\"name\"]],\"Issue Status\":[\"get\",[\"fields\",\"status\",\"name\"]],\"Assignee\":[\"get\",[\"fields\",\"assignee\",\"displayName\"]],\"Issue Created\":[\"get\",[\"fields\",\"created\"]],\"Due Date\":[\"get\",[\"fields\",\"duedate\"]],\"DELTA Comments\":[\"get\",[\"fields\",\"customfield_10167\"]],\"SELISE Ticket References\":[\"put\",[\"fields\",\"customfield_10067\"]],\"SELISE Status Values\":[\"put\",[\"fields\",\"customfield_10065\"]],\"SELISE Comments\":[\"put\",[\"fields\",\"customfield_10168\"]]}"
- }
- }
- ]
-}
-
-
-def getAutomationTemplates() -> Dict[str, Any]:
- """
- Get automation templates.
-
- Returns:
- Dict containing the automation templates structure with 'sets' key.
- """
- return AUTOMATION_TEMPLATES
-
diff --git a/modules/workflows/automation/subAutomationUtils.py b/modules/workflows/automation/subAutomationUtils.py
deleted file mode 100644
index bdac6efb..00000000
--- a/modules/workflows/automation/subAutomationUtils.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Utility functions for automation feature.
-
-Moved from interfaces/interfaceDbChat.py.
-"""
-
-import json
-from typing import Dict, Any
-from datetime import datetime, UTC
-
-
-def parseScheduleToCron(schedule: str) -> Dict[str, Any]:
- """Parse schedule string to cron kwargs for APScheduler"""
- parts = schedule.split()
- if len(parts) != 5:
- raise ValueError(f"Invalid schedule format: {schedule}")
-
- return {
- "minute": parts[0],
- "hour": parts[1],
- "day": parts[2],
- "month": parts[3],
- "day_of_week": parts[4]
- }
-
-
-def planToPrompt(plan: Dict) -> str:
- """Convert plan structure to prompt string for workflow execution"""
- return plan.get("userMessage", plan.get("overview", "Execute automation workflow"))
-
-
-def replacePlaceholders(template: str, placeholders: Dict[str, str]) -> str:
- """Replace placeholders in template with actual values. Placeholder format: {{KEY:PLACEHOLDER_NAME}} or {{TIMESTAMP}}"""
- result = template
-
- # Replace TIMESTAMP placeholder first (calculated placeholder, not from parameters)
- timestampPattern = "{{TIMESTAMP}}"
- if timestampPattern in result:
- timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
- result = result.replace(timestampPattern, timestamp)
-
- for placeholderName, value in placeholders.items():
- pattern = f"{{{{KEY:{placeholderName}}}}}"
-
- # Check if placeholder is in an array context like ["{{KEY:...}}"]
- # If value is a JSON array/dict, we should replace the entire ["{{KEY:...}}"] with the array
- arrayPattern = f'["{pattern}"]'
- if arrayPattern in result:
- # Check if value is a JSON array/dict
- isArrayValue = False
- arrayValue = None
-
- if isinstance(value, (list, dict)):
- isArrayValue = True
- arrayValue = json.dumps(value)
- elif isinstance(value, str):
- try:
- parsed = json.loads(value)
- if isinstance(parsed, (list, dict)):
- isArrayValue = True
- arrayValue = value # Already valid JSON string
- except (json.JSONDecodeError, ValueError):
- pass
-
- if isArrayValue:
- # Replace ["{{KEY:...}}"] with the array value
- result = result.replace(arrayPattern, arrayValue)
- continue # Skip the regular replacement below
-
- # Replace occurrences one-by-one to handle mixed contexts
- while pattern in result:
- patternStart = result.find(pattern)
- isQuoted = False
- if patternStart > 0:
- charBefore = result[patternStart - 1]
- patternEnd = patternStart + len(pattern)
- charAfter = result[patternEnd] if patternEnd < len(result) else None
- if charBefore == '"' and charAfter == '"':
- isQuoted = True
-
- if isinstance(value, (list, dict)):
- replacement = json.dumps(value)
- elif isinstance(value, str):
- try:
- parsed = json.loads(value)
- if isinstance(parsed, (list, dict)):
- if isQuoted:
- escaped = json.dumps(value)
- replacement = escaped[1:-1]
- else:
- replacement = value
- else:
- if isQuoted:
- escaped = json.dumps(value)
- replacement = escaped[1:-1]
- else:
- replacement = value
- except (json.JSONDecodeError, ValueError):
- if isQuoted:
- escaped = json.dumps(value)
- replacement = escaped[1:-1]
- else:
- replacement = value
- else:
- replacement = str(value)
- result = result[:patternStart] + replacement + result[patternStart + len(pattern):]
- return result
-
diff --git a/modules/workflows/automation2/executionEngine.py b/modules/workflows/automation2/executionEngine.py
index 3ab08992..b51128f8 100644
--- a/modules/workflows/automation2/executionEngine.py
+++ b/modules/workflows/automation2/executionEngine.py
@@ -1,7 +1,10 @@
# Copyright (c) 2025 Patrick Motsch
# Main execution engine for automation2 graphs.
+import asyncio
import logging
+import time
+import uuid
from datetime import datetime, timezone
from typing import Dict, Any, List, Set, Optional
@@ -19,14 +22,89 @@ from modules.workflows.automation2.executors import (
FlowExecutor,
ActionNodeExecutor,
InputExecutor,
+ DataExecutor,
PauseForHumanTaskError,
PauseForEmailWaitError,
)
-from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
+from modules.features.graphicalEditor.portTypes import _normalizeToSchema
+from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
from modules.workflows.automation2.runEnvelope import normalize_run_envelope
logger = logging.getLogger(__name__)
+_NODE_DEF_BY_ID: Dict[str, dict] = {}
+
+# Registry of currently executing runs: runId -> context dict.
+# Used by requestRunStop() to set context["_stopped"] = True.
+_activeRunContexts: Dict[str, Dict[str, Any]] = {}
+
+
+def requestRunStop(runId: str) -> bool:
+ """Request a running workflow to stop at the next node boundary.
+
+ Returns True if the run was found and flagged, False otherwise.
+ """
+ ctx = _activeRunContexts.get(runId)
+ if ctx is not None:
+ ctx["_stopped"] = True
+ logger.info("requestRunStop: flagged runId=%s for stop", runId)
+ return True
+ logger.warning("requestRunStop: runId=%s not found in active runs", runId)
+ return False
+
+
+def getActiveRunIds() -> list:
+ """Return list of currently executing run IDs."""
+ return list(_activeRunContexts.keys())
+
+
+def _getNodeDef(nodeType: str) -> Optional[dict]:
+ """Lookup static node definition by type id (cached)."""
+ if not _NODE_DEF_BY_ID:
+ for nd in STATIC_NODE_TYPES:
+ _NODE_DEF_BY_ID[nd["id"]] = nd
+ return _NODE_DEF_BY_ID.get(nodeType)
+
+
+def _outputSchemaForNode(nodeType: str) -> Optional[str]:
+ """Return the output port schema name for a node type (port 0), or None."""
+ nd = _getNodeDef(nodeType)
+ if not nd:
+ return None
+ ports = nd.get("outputPorts")
+ if isinstance(ports, dict):
+ p0 = ports.get(0) or ports.get("0")
+ if isinstance(p0, dict):
+ return p0.get("schema")
+ return None
+
+
+def _isMergeNode(nodeType: str) -> bool:
+ return nodeType == "flow.merge"
+
+
+def _allMergePredecessorsReady(
+ nodeId: str,
+ connectionMap: Dict[str, List],
+ nodeOutputs: Dict[str, Any],
+) -> bool:
+ """For flow.merge: check that every connected predecessor has produced output or was skipped."""
+ for src, _, _ in connectionMap.get(nodeId, []):
+ if src not in nodeOutputs:
+ return False
+ return True
+
+
+def _normalizeResult(result: Any, nodeType: str) -> Any:
+ """Apply _normalizeToSchema if the node has a declared output schema."""
+ schema = _outputSchemaForNode(nodeType)
+ if schema and schema != "Transit" and isinstance(result, dict):
+ try:
+ return _normalizeToSchema(result, schema)
+ except Exception as e:
+ logger.warning(f"_normalizeResult failed for nodeType={nodeType}, schema={schema}: {e}")
+ return result
+
def _getNodeTypeIds(services: Any = None) -> Set[str]:
"""Collect all known node type IDs from static definitions."""
@@ -40,10 +118,8 @@ def _is_node_on_active_path(
) -> bool:
"""
Return True if this node receives input only from active branches.
- - flow.ifElse: only one output (0=yes, 1=no) is active; uses "branch".
- - flow.switch: only one output (0, 1, 2, ...) is active; uses "match".
- Nodes connected to inactive outputs must be skipped.
- Also skip when a predecessor was skipped (not in nodeOutputs).
+ Transit envelopes: routing metadata is in out["_meta"] (branch/match).
+ Legacy format: branch/match directly on out.
"""
for src, source_output, _ in connectionMap.get(nodeId, []):
out = nodeOutputs.get(src)
@@ -51,14 +127,18 @@ def _is_node_on_active_path(
return False
if not isinstance(out, dict):
continue
- branch = out.get("branch")
- match = out.get("match")
+
+ # Transit envelope: metadata in _meta
+ meta = out.get("_meta", {}) if out.get("_transit") else out
+ branch = meta.get("branch")
+ match = meta.get("match")
+
active_output = None
if branch is not None:
active_output = branch
elif match is not None:
if match < 0:
- return False # switch: no case matched, skip all downstream
+ return False
active_output = match
if active_output is not None and source_output != active_output:
return False
@@ -75,13 +155,132 @@ def _getExecutor(
return TriggerExecutor()
if nodeType.startswith("flow."):
return FlowExecutor()
- if nodeType.startswith("ai.") or nodeType.startswith("email.") or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.") or nodeType.startswith("file."):
+ if nodeType.startswith("data."):
+ return DataExecutor()
+ if (nodeType.startswith("ai.") or nodeType.startswith("email.")
+ or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.")
+ or nodeType.startswith("file.") or nodeType.startswith("trustee.")):
return ActionNodeExecutor(services)
if nodeType.startswith("input.") and automation2_interface:
return InputExecutor(automation2_interface)
return None
+_stepMeta: Dict[str, Dict[str, str]] = {}
+
+
+def _serializableOutputs(nodeOutputs: Dict[str, Any]) -> Dict[str, Any]:
+ """Return a shallow copy of nodeOutputs without the circular _context reference."""
+ return {k: v for k, v in nodeOutputs.items() if k != "_context"}
+
+
+def _emitStepEvent(runId: str, stepData: Dict[str, Any]) -> None:
+ """Emit a step-log SSE event to any listening client for this run."""
+ try:
+ from modules.serviceCenter.core.serviceStreaming.eventManager import get_event_manager
+ em = get_event_manager()
+ queueId = f"run-trace-{runId}"
+ if not em.has_queue(queueId):
+ return
+ import asyncio
+ loop = asyncio.get_event_loop()
+ if loop.is_running():
+ asyncio.ensure_future(em.emit_event(queueId, "step", stepData, event_category="tracing"))
+ except Exception as e:
+ logger.warning(f"_emitStepEvent failed for runId={runId}: {e}")
+
+
+def _createStepLog(iface, runId: str, nodeId: str, nodeType: str, status: str = "running", inputSnapshot: Dict = None) -> Optional[str]:
+ """Create an AutoStepLog entry. Returns the step log ID or None if interface unavailable."""
+ if not iface or not runId:
+ return None
+ try:
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoStepLog
+ stepId = str(uuid.uuid4())
+ startedAt = time.time()
+ iface.db.recordCreate(AutoStepLog, {
+ "id": stepId,
+ "runId": runId,
+ "nodeId": nodeId,
+ "nodeType": nodeType,
+ "status": status,
+ "inputSnapshot": inputSnapshot or {},
+ "startedAt": startedAt,
+ })
+ _stepMeta[stepId] = {"runId": runId, "nodeId": nodeId, "nodeType": nodeType}
+ _emitStepEvent(runId, {
+ "id": stepId, "runId": runId, "nodeId": nodeId, "nodeType": nodeType,
+ "status": status, "startedAt": startedAt,
+ })
+ return stepId
+ except Exception as e:
+ logger.debug("Could not create AutoStepLog: %s", e)
+ return None
+
+
+def _updateStepLog(iface, stepId: str, status: str, output: Dict = None, error: str = None,
+ durationMs: int = None, tokensUsed: int = 0, retryCount: int = 0) -> None:
+ """Update an AutoStepLog entry with results."""
+ if not iface or not stepId:
+ return
+ try:
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoStepLog
+ completedAt = time.time()
+ updates: Dict[str, Any] = {
+ "status": status,
+ "completedAt": completedAt,
+ }
+ if output is not None:
+ updates["output"] = output
+ if error is not None:
+ updates["error"] = error
+ if durationMs is not None:
+ updates["durationMs"] = durationMs
+ if tokensUsed:
+ updates["tokensUsed"] = tokensUsed
+ if retryCount:
+ updates["retryCount"] = retryCount
+ iface.db.recordModify(AutoStepLog, stepId, updates)
+ meta = _stepMeta.pop(stepId, None)
+ if meta:
+ _emitStepEvent(meta["runId"], {
+ "id": stepId, "runId": meta["runId"], "nodeId": meta["nodeId"],
+ "nodeType": meta["nodeType"], "status": status,
+ "completedAt": completedAt, "durationMs": durationMs,
+ "error": error, "tokensUsed": tokensUsed, "retryCount": retryCount,
+ })
+ except Exception as e:
+ logger.debug("Could not update AutoStepLog %s: %s", stepId, e)
+
+
+async def _executeWithRetry(executor, node, context, maxRetries: int = 0, retryDelaySeconds: float = 1.0):
+ """Execute a node with optional retry policy from node parameters."""
+ params = node.get("parameters") or {}
+ retries = params.get("retryMaxAttempts", maxRetries)
+ delay = params.get("retryDelaySeconds", retryDelaySeconds)
+ attempt = 0
+ lastError = None
+ while attempt <= retries:
+ try:
+ result = await executor.execute(node, context)
+ return result, attempt
+ except (PauseForHumanTaskError, PauseForEmailWaitError):
+ raise
+ except Exception as e:
+ lastError = e
+ attempt += 1
+ if attempt <= retries:
+ logger.warning(
+ "Node %s failed (attempt %d/%d), retrying in %.1fs: %s",
+ node.get("id"), attempt, retries + 1, delay, e,
+ )
+ await asyncio.sleep(delay)
+ delay = min(delay * 2, 60)
+ else:
+ raise lastError
+ raise lastError
+
+
async def executeGraph(
graph: Dict[str, Any],
services: Any,
@@ -94,6 +293,7 @@ async def executeGraph(
startAfterNodeId: Optional[str] = None,
runId: Optional[str] = None,
run_envelope: Optional[Dict[str, Any]] = None,
+ label: Optional[str] = None,
) -> Dict[str, Any]:
"""
Execute automation2 graph. Returns { success, nodeOutputs, error?, stopped? }.
@@ -135,6 +335,19 @@ async def executeGraph(
nodeOutputs: Dict[str, Any] = dict(initialNodeOutputs or {})
is_resume = startAfterNodeId is not None
+
+ if is_resume and initialNodeOutputs and startAfterNodeId:
+ resumedNode = next((n for n in nodes if n.get("id") == startAfterNodeId), None)
+ if resumedNode:
+ resumedType = resumedNode.get("type", "")
+ resumedOutput = initialNodeOutputs.get(startAfterNodeId)
+ if isinstance(resumedOutput, dict):
+ schema = _outputSchemaForNode(resumedType)
+ if schema and schema != "Transit":
+ try:
+ initialNodeOutputs[startAfterNodeId] = _normalizeToSchema(resumedOutput, schema)
+ except Exception as valErr:
+ logger.warning("executeGraph resume: schema validation failed for %s: %s", startAfterNodeId, valErr)
if not runId and automation2_interface and workflowId and not is_resume:
run_context = {
"connectionMap": connectionMap,
@@ -147,13 +360,30 @@ async def executeGraph(
run_context["mandateId"] = mandateId
if instanceId:
run_context["instanceId"] = instanceId
+
+ run_label = label
+ if not run_label and automation2_interface and workflowId:
+ try:
+ wfObj = automation2_interface.getWorkflow(workflowId)
+ if wfObj:
+ wfDict = wfObj if isinstance(wfObj, dict) else (
+ wfObj.model_dump() if hasattr(wfObj, "model_dump") else {}
+ )
+ run_label = wfDict.get("label")
+ except Exception:
+ pass
+ if not run_label:
+ ts = datetime.now(timezone.utc).strftime("%d.%m.%Y %H:%M")
+ run_label = f"Manuell ({ts})"
+
run = automation2_interface.createRun(
workflowId=workflowId,
nodeOutputs=nodeOutputs,
context=run_context,
+ label=run_label,
)
runId = run.get("id") if run else None
- logger.info("executeGraph created run %s", runId)
+ logger.info("executeGraph created run %s label=%s", runId, run_label)
env_for_run = normalize_run_envelope(run_envelope, user_id=userId)
@@ -170,9 +400,15 @@ async def executeGraph(
"_orderedNodes": ordered,
"runEnvelope": env_for_run,
}
+ # _context key in nodeOutputs for system variable resolution
+ nodeOutputs["_context"] = context
+
+ if runId:
+ _activeRunContexts[runId] = context
skip_until_passed = bool(startAfterNodeId)
processed_in_loop: Set[str] = set()
+ _aggregateAccumulators: Dict[str, list] = {}
# Check for loop resume: run was paused inside a loop, we're resuming for next iteration
run = automation2_interface.getRun(runId) if (runId and automation2_interface) else None
@@ -202,25 +438,53 @@ async def executeGraph(
if not executor:
nodeOutputs[bnid] = None
continue
+ _rStepStart = time.time()
+ _rInputSnap = {"_loopItem": items[next_index], "_loopIndex": next_index}
+ for _rSrc, _, _ in connectionMap.get(bnid, []):
+ if _rSrc in nodeOutputs:
+ _rInputSnap[_rSrc] = nodeOutputs[_rSrc]
+ _rStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _rInputSnap)
try:
- result = await executor.execute(body_node, context)
+ result, _rRetry = await _executeWithRetry(executor, body_node, context)
+ if body_node.get("type") == "data.aggregate":
+ if bnid not in _aggregateAccumulators:
+ _aggregateAccumulators[bnid] = []
+ accItems = result.get("items", [result]) if isinstance(result, dict) else [result]
+ _aggregateAccumulators[bnid].extend(accItems)
nodeOutputs[bnid] = result
- logger.info("executeGraph loop resume body node %s done (iter %d)", bnid, next_index)
+ _rDur = int((time.time() - _rStepStart) * 1000)
+ _updateStepLog(automation2_interface, _rStepId, "completed",
+ output=result if isinstance(result, dict) else {"value": result},
+ durationMs=_rDur, retryCount=_rRetry)
+ logger.info("executeGraph loop resume body node %s done (iter %d, retries=%d)", bnid, next_index, _rRetry)
except PauseForHumanTaskError as e:
+ _updateStepLog(automation2_interface, _rStepId, "completed",
+ durationMs=int((time.time() - _rStepStart) * 1000))
if automation2_interface:
run_ctx = dict(run.get("context") or {})
run_ctx["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items}
- automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
- return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)}
+ automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
+ return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": _serializableOutputs(nodeOutputs)}
+ except PauseForEmailWaitError as e:
+ _updateStepLog(automation2_interface, _rStepId, "completed",
+ durationMs=int((time.time() - _rStepStart) * 1000))
+ raise
except Exception as ex:
+ _updateStepLog(automation2_interface, _rStepId, "failed",
+ error=str(ex), durationMs=int((time.time() - _rStepStart) * 1000))
logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex)
nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface:
- automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs)
- return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid}
+ automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
+ if runId:
+ _activeRunContexts.pop(runId, None)
+ return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId}
next_index += 1
if loop_node_id:
nodeOutputs[loop_node_id] = {"items": items, "count": len(items)}
+ for aggId, accItems in _aggregateAccumulators.items():
+ nodeOutputs[aggId] = {"items": accItems, "count": len(accItems), "_success": True}
+ _aggregateAccumulators.clear()
processed_in_loop = set(body_ids) | {loop_node_id}
for i, node in enumerate(ordered):
@@ -237,6 +501,13 @@ async def executeGraph(
nodeType = node.get("type", "")
if not _is_node_on_active_path(nodeId, connectionMap, nodeOutputs):
logger.info("executeGraph step %d/%d: nodeId=%s SKIP (inactive branch)", i + 1, len(ordered), nodeId)
+ _skipInputSnap = {"_skipReason": "inactive_branch"}
+ for _sSrc, _, _ in connectionMap.get(nodeId, []):
+ if _sSrc in nodeOutputs:
+ _skipInputSnap[_sSrc] = nodeOutputs[_sSrc]
+ _skipStepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, status="skipped", inputSnapshot=_skipInputSnap)
+ if _skipStepId:
+ _updateStepLog(automation2_interface, _skipStepId, "skipped")
continue
executor = _getExecutor(nodeType, services, automation2_interface)
logger.info(
@@ -251,8 +522,15 @@ async def executeGraph(
nodeOutputs[nodeId] = None
logger.debug("executeGraph node %s: no executor, output=None", nodeId)
continue
+ _stepStartMs = time.time()
+ _stepId = None
try:
if nodeType == "flow.loop":
+ _loopInputSnap = {}
+ for _lSrc, _, _ in connectionMap.get(nodeId, []):
+ if _lSrc in nodeOutputs:
+ _loopInputSnap[_lSrc] = nodeOutputs[_lSrc]
+ _stepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, "running", _loopInputSnap)
result = await executor.execute(node, context)
items = result.get("items") or []
body_ids = getLoopBodyNodeIds(nodeId, connectionMap)
@@ -272,35 +550,98 @@ async def executeGraph(
if not bexec:
nodeOutputs[bnid] = None
continue
+ _bStepStart = time.time()
+ _bInputSnap = {"_loopItem": item, "_loopIndex": idx}
+ for _bSrc, _, _ in connectionMap.get(bnid, []):
+ if _bSrc in nodeOutputs:
+ _bInputSnap[_bSrc] = nodeOutputs[_bSrc]
+ _bStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _bInputSnap)
try:
- bres = await bexec.execute(body_node, context)
+ bres, _bRetry = await _executeWithRetry(bexec, body_node, context)
+ # data.aggregate: accumulate instead of overwrite
+ if body_node.get("type") == "data.aggregate":
+ if bnid not in _aggregateAccumulators:
+ _aggregateAccumulators[bnid] = []
+ accItems = bres.get("items", [bres]) if isinstance(bres, dict) else [bres]
+ _aggregateAccumulators[bnid].extend(accItems)
nodeOutputs[bnid] = bres
- logger.info("executeGraph loop body node %s done (iter %d)", bnid, idx)
+ _bDur = int((time.time() - _bStepStart) * 1000)
+ _updateStepLog(automation2_interface, _bStepId, "completed",
+ output=bres if isinstance(bres, dict) else {"value": bres},
+ durationMs=_bDur, retryCount=_bRetry)
+ logger.info("executeGraph loop body node %s done (iter %d, retries=%d)", bnid, idx, _bRetry)
except PauseForHumanTaskError as e:
+ _updateStepLog(automation2_interface, _bStepId, "completed",
+ durationMs=int((time.time() - _bStepStart) * 1000))
if runId and automation2_interface:
run = automation2_interface.getRun(runId) or {}
run_ctx = dict(run.get("context") or {})
run_ctx["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items}
- automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
- return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)}
+ automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx)
+ return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": _serializableOutputs(nodeOutputs)}
+ except PauseForEmailWaitError as e:
+ _updateStepLog(automation2_interface, _bStepId, "completed",
+ durationMs=int((time.time() - _bStepStart) * 1000))
+ raise
except Exception as ex:
+ _updateStepLog(automation2_interface, _bStepId, "failed",
+ error=str(ex), durationMs=int((time.time() - _bStepStart) * 1000))
logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex)
nodeOutputs[bnid] = {"error": str(ex), "success": False}
if runId and automation2_interface:
- automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs)
- return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid}
+ automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
+ if runId:
+ _activeRunContexts.pop(runId, None)
+ return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId}
nodeOutputs[nodeId] = {"items": items, "count": len(items)}
+ # Finalize aggregate accumulators after loop
+ for aggId, accItems in _aggregateAccumulators.items():
+ nodeOutputs[aggId] = {"items": accItems, "count": len(accItems), "_success": True}
+ _aggregateAccumulators.clear()
+ _updateStepLog(automation2_interface, _stepId, "completed",
+ output={"iterationCount": len(items), "items": len(items)},
+ durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph flow.loop done: %d iterations", len(items))
- else:
- result = await executor.execute(node, context)
+ elif _isMergeNode(nodeType):
+ if not _allMergePredecessorsReady(nodeId, connectionMap, nodeOutputs):
+ logger.info("executeGraph node %s (flow.merge): waiting — not all predecessors ready, deferring", nodeId)
+ nodeOutputs[nodeId] = None
+ continue
+ _stepStartMs = time.time()
+ _inputSnap = {}
+ for src, _, _ in connectionMap.get(nodeId, []):
+ if src in nodeOutputs:
+ _inputSnap[src] = nodeOutputs[src]
+ _stepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, "running", _inputSnap)
+ result, retryCount = await _executeWithRetry(executor, node, context)
+ result = _normalizeResult(result, nodeType)
nodeOutputs[nodeId] = result
+ else:
+ _stepStartMs = time.time()
+ _inputSnap = {}
+ for src, _, _ in connectionMap.get(nodeId, []):
+ if src in nodeOutputs:
+ _inputSnap[src] = nodeOutputs[src]
+ _stepId = _createStepLog(automation2_interface, runId, nodeId, nodeType, "running", _inputSnap)
+ result, retryCount = await _executeWithRetry(executor, node, context)
+ result = _normalizeResult(result, nodeType)
+ nodeOutputs[nodeId] = result
+ _durMs = int((time.time() - _stepStartMs) * 1000)
+ _tokens = result.get("tokensUsed", 0) if isinstance(result, dict) else 0
+ _updateStepLog(automation2_interface, _stepId, "completed",
+ output=result if isinstance(result, dict) else {"value": result},
+ durationMs=_durMs, tokensUsed=_tokens, retryCount=retryCount)
logger.info(
- "executeGraph node %s done: result_type=%s result_keys=%s",
+ "executeGraph node %s done: result_type=%s result_keys=%s retries=%d duration=%dms",
nodeId,
type(result).__name__,
list(result.keys()) if isinstance(result, dict) else "n/a",
+ retryCount,
+ _durMs,
)
except PauseForHumanTaskError as e:
+ _updateStepLog(automation2_interface, _stepId, "completed",
+ durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph paused for human task %s", e.taskId)
return {
"success": False,
@@ -308,14 +649,15 @@ async def executeGraph(
"taskId": e.taskId,
"runId": e.runId,
"nodeId": e.nodeId,
- "nodeOutputs": dict(nodeOutputs),
+ "nodeOutputs": _serializableOutputs(nodeOutputs),
}
except PauseForEmailWaitError as e:
+ _updateStepLog(automation2_interface, _stepId, "completed",
+ durationMs=int((time.time() - _stepStartMs) * 1000))
logger.info("executeGraph paused for email wait (run %s, node %s)", e.runId, e.nodeId)
- # Start email poller on-demand (only runs while workflows wait for email)
try:
from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.features.automation2.emailPoller import ensureRunning
+ from modules.features.graphicalEditor.emailPoller import ensureRunning
root = getRootInterface()
event_user = root.getUserByUsername("event") if root else None
if event_user:
@@ -338,7 +680,7 @@ async def executeGraph(
automation2_interface.updateRun(
e.runId,
status="paused",
- nodeOutputs=dict(nodeOutputs),
+ nodeOutputs=_serializableOutputs(nodeOutputs),
currentNodeId=e.nodeId,
context=run_ctx,
)
@@ -348,29 +690,59 @@ async def executeGraph(
"waitReason": "email",
"runId": e.runId,
"nodeId": e.nodeId,
- "nodeOutputs": dict(nodeOutputs),
+ "nodeOutputs": _serializableOutputs(nodeOutputs),
}
except Exception as e:
logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e)
nodeOutputs[nodeId] = {"error": str(e), "success": False}
+ _durMs = int((time.time() - _stepStartMs) * 1000)
+ _updateStepLog(automation2_interface, _stepId, "failed", error=str(e), durationMs=_durMs)
if runId and automation2_interface:
- automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs)
+ automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs))
+ if runId:
+ _emitStepEvent(runId, {"type": "run_failed", "runId": runId, "status": "failed", "error": str(e), "failedNode": nodeId})
+ try:
+ _wfObj = automation2_interface.getWorkflow(workflowId) if automation2_interface and workflowId else None
+ _wfDict = _wfObj if isinstance(_wfObj, dict) else (
+ _wfObj.model_dump() if hasattr(_wfObj, "model_dump") else {}
+ ) if _wfObj else {}
+ _shouldNotify = _wfDict.get("notifyOnFailure", True) if _wfDict else True
+ if _shouldNotify:
+ from modules.workflows.scheduler.mainScheduler import _notifyRunFailed
+ _notifyRunFailed(
+ workflowId or "", runId or "", str(e),
+ mandateId=mandateId,
+ workflowLabel=_wfDict.get("label"),
+ )
+ except Exception as notifyErr:
+ logger.warning(f"executeGraph: failure notification failed for run={runId}: {notifyErr}")
+ if runId:
+ _activeRunContexts.pop(runId, None)
return {
"success": False,
"error": str(e),
- "nodeOutputs": nodeOutputs,
+ "nodeOutputs": _serializableOutputs(nodeOutputs),
"failedNode": nodeId,
+ "runId": runId,
}
+ _safeOutputs = _serializableOutputs(nodeOutputs)
+ _wasStopped = context.get("_stopped", False)
+ _finalStatus = "stopped" if _wasStopped else "completed"
+
if runId and automation2_interface:
- automation2_interface.updateRun(runId, status="completed", nodeOutputs=nodeOutputs)
+ automation2_interface.updateRun(runId, status=_finalStatus, nodeOutputs=_safeOutputs)
+ if runId:
+ _emitStepEvent(runId, {"type": "run_complete", "runId": runId, "status": _finalStatus})
+ _activeRunContexts.pop(runId, None)
logger.info(
"executeGraph complete: success=True nodeOutputs_keys=%s stopped=%s",
list(nodeOutputs.keys()),
- context.get("_stopped", False),
+ _wasStopped,
)
return {
"success": True,
- "nodeOutputs": nodeOutputs,
- "stopped": context.get("_stopped", False),
+ "nodeOutputs": _safeOutputs,
+ "stopped": _wasStopped,
+ "runId": runId,
}
diff --git a/modules/workflows/automation2/executors/__init__.py b/modules/workflows/automation2/executors/__init__.py
index 2b6768df..4d2180c3 100644
--- a/modules/workflows/automation2/executors/__init__.py
+++ b/modules/workflows/automation2/executors/__init__.py
@@ -5,12 +5,14 @@ from .triggerExecutor import TriggerExecutor
from .flowExecutor import FlowExecutor
from .actionNodeExecutor import ActionNodeExecutor
from .inputExecutor import InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError
+from .dataExecutor import DataExecutor
__all__ = [
"TriggerExecutor",
"FlowExecutor",
"ActionNodeExecutor",
"InputExecutor",
+ "DataExecutor",
"PauseForHumanTaskError",
"PauseForEmailWaitError",
]
diff --git a/modules/workflows/automation2/executors/actionNodeExecutor.py b/modules/workflows/automation2/executors/actionNodeExecutor.py
index ab19964d..73116a2e 100644
--- a/modules/workflows/automation2/executors/actionNodeExecutor.py
+++ b/modules/workflows/automation2/executors/actionNodeExecutor.py
@@ -1,26 +1,30 @@
# Copyright (c) 2025 Patrick Motsch
-# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.* to method actions via ActionExecutor.
+# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions.
#
-# Unified handover format for all nodes:
-# - Node output: { success, error?, documents, documentList, data } – documents and documentList are identical
-# - Input merge: downstream receives documents via _getDocumentsFromUpstream(inp) – reads documents or documentList
-# - Incoming email handover: (context, documentList, reply_to, subject) via _formatEmailOutputAsContext / _unpackIncomingEmail
+# Typed Port System: no heuristic merging. Uses INPUT_EXTRACTORS for wire-handover,
+# DataRef for explicit parameter mapping, and _normalizeToSchema for output normalization.
import json
import logging
import re
from typing import Dict, Any, List, Optional
+from modules.features.graphicalEditor.portTypes import (
+ INPUT_EXTRACTORS,
+ _normalizeToSchema,
+ _normalizeError,
+ _unwrapTransit,
+)
+
logger = logging.getLogger(__name__)
-# UserConnection.id (UUID) when connectionId could not be mapped to connection:authority:username
_USER_CONNECTION_ID_RE = re.compile(
r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$",
re.IGNORECASE,
)
-def _is_user_connection_id(val: Any) -> bool:
+def _isUserConnectionId(val: Any) -> bool:
if val is None or isinstance(val, (dict, list)):
return False
s = str(val).strip()
@@ -28,8 +32,8 @@ def _is_user_connection_id(val: Any) -> bool:
def _getNodeDefinition(nodeType: str) -> Optional[Dict[str, Any]]:
- """Get node definition by type id for _method, _action, _paramMap."""
- from modules.features.automation2.nodeDefinitions import STATIC_NODE_TYPES
+ """Get node definition by type id."""
+ from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
for node in STATIC_NODE_TYPES:
if node.get("id") == nodeType:
return node
@@ -40,14 +44,11 @@ def _resolveConnectionIdToReference(chatService, connectionId: str, services=Non
"""
Resolve connectionId (UserConnection.id) to connectionReference format.
connectionReference format: connection:{authority}:{externalUsername}
- Falls back to interfaceDbApp.getUserConnectionById when chatService resolution fails.
"""
if not connectionId:
return None
- # Already in reference format
if isinstance(connectionId, str) and connectionId.startswith("connection:"):
return connectionId
- # Try chatService first
if chatService:
try:
connections = chatService.getUserConnections()
@@ -61,7 +62,6 @@ def _resolveConnectionIdToReference(chatService, connectionId: str, services=Non
return f"connection:{authority}:{username}"
except Exception as e:
logger.debug("_resolveConnectionIdToReference chatService: %s", e)
- # Fallback: interfaceDbApp.getUserConnectionById (automation2 may not have chat.getUserConnections)
app = getattr(services, "interfaceDbApp", None) if services else None
if app and hasattr(app, "getUserConnectionById"):
try:
@@ -79,344 +79,8 @@ def _resolveConnectionIdToReference(chatService, connectionId: str, services=Non
return None
-def _extractEmailContentFromUpstream(inp: Any) -> Optional[Dict[str, Any]]:
- """
- Extract {subject, body, to} from upstream node output (e.g. AI node returning JSON).
- Expects JSON like {"subject": "...", "body": "...", "to": "..."} in documentData.
- Uses unified handover: documents/documentList.
- """
- if not inp:
- return None
- import json
- docs = _getDocumentsFromUpstream(inp)
- if not docs:
- return None
- doc = docs[0] if isinstance(docs, list) else docs
- raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None)
- if not raw:
- return None
- try:
- data = json.loads(raw) if isinstance(raw, str) else raw
- if isinstance(data, dict) and data.get("subject") and data.get("body"):
- return {
- "subject": str(data.get("subject", "")),
- "body": str(data.get("body", "")),
- "to": data.get("to"),
- }
- except (json.JSONDecodeError, TypeError):
- pass
- return None
-
-
-def _extractContextFromUpstream(inp: Any) -> Optional[str]:
- """
- Extract plain text context from upstream node output (e.g. AI node returning txt).
- Use when _extractEmailContentFromUpstream returns None – the generated document content
- (email body, summary, etc.) should be passed as context to email.draftEmail.
- Uses unified handover: documents/documentList.
- """
- if not inp:
- return None
- docs = _getDocumentsFromUpstream(inp)
- if not docs:
- return None
- doc = docs[0] if docs else None
- if not doc:
- return None
- raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") or doc.get("content") if isinstance(doc, dict) else None)
- if not raw:
- return None
- if isinstance(raw, bytes):
- return raw.decode("utf-8", errors="replace").strip()
- s = str(raw).strip()
- return s if s else None
-
-
-def _payloadToContext(payload: Any) -> Optional[str]:
- """Convert payload (e.g. from form) to readable text for document context."""
- if payload is None:
- return None
- if isinstance(payload, str) and payload.strip():
- return payload.strip()
- if isinstance(payload, dict):
- try:
- import json
- return json.dumps(payload, ensure_ascii=False, indent=2)
- except (TypeError, ValueError):
- lines = [f"{k}: {v}" for k, v in payload.items()]
- return "\n".join(lines) if lines else None
- return str(payload).strip() if str(payload).strip() else None
-
-
-def _getContextFromUpstream(out: Any) -> Optional[str]:
- """
- Get context from upstream node output. Prefers explicit 'context' field;
- falls back to documents/documentList (first doc's documentData), then payload.
- Handles: AI (context), form (payload or top-level field dict), upload (document refs).
- """
- if not out or not isinstance(out, dict):
- return None
- ctx = out.get("context")
- if isinstance(ctx, str) and ctx.strip():
- return ctx.strip()
- doc_ctx = _extractContextFromUpstream(out)
- if doc_ctx:
- return doc_ctx
- payload = out.get("payload")
- if payload is not None:
- return _payloadToContext(payload)
- if "documents" not in out and "documentList" not in out and "success" not in out:
- return _payloadToContext(out)
- return None
-
-
-def _extractContextFromResult(result: Any) -> Optional[str]:
- """
- Extract plain text context from ActionResult (ActionExecutor result).
- Used to populate 'context' in unified output for AI nodes.
- """
- if not result or not hasattr(result, "documents"):
- return None
- docs = result.documents or []
- if not docs:
- return None
- doc = docs[0]
- raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None)
- if not raw:
- return None
- if isinstance(raw, bytes):
- return raw.decode("utf-8", errors="replace").strip()
- return str(raw).strip() if str(raw).strip() else None
-
-
-def _gatherAttachmentDocumentsFromUpstream(
- nodeId: str,
- inputSources: Dict[str, Dict[int, tuple]],
- nodeOutputs: Dict[str, Any],
- orderedNodes: List[Dict],
- visited: Optional[set] = None,
-) -> List[Any]:
- """
- Walk upstream from nodeId through AI nodes to collect file documents (e.g. from sharepoint.downloadFile).
- Used when email.draftEmail has AI upstream – attachments come from file nodes, not AI output.
- """
- visited = visited or set()
- if nodeId in visited:
- return []
- visited.add(nodeId)
- docs = []
- src = inputSources.get(nodeId, {}).get(0)
- if not src:
- return []
- srcId, _ = src
- srcNode = next((n for n in (orderedNodes or []) if n.get("id") == srcId), None)
- srcType = (srcNode or {}).get("type", "")
- out = nodeOutputs.get(srcId)
-
- if srcType in ("sharepoint.downloadFile", "sharepoint.readFile"):
- if isinstance(out, dict):
- for d in _getDocumentsFromUpstream(out):
- if isinstance(d, dict) and (d.get("documentData") or (d.get("validationMetadata") or {}).get("fileId")):
- docs.append(d)
- elif hasattr(d, "documentData") or (getattr(d, "validationMetadata", None) or {}).get("fileId"):
- docs.append(d.model_dump() if hasattr(d, "model_dump") else d)
- elif srcType.startswith("ai."):
- docs.extend(
- _gatherAttachmentDocumentsFromUpstream(srcId, inputSources, nodeOutputs, orderedNodes, visited)
- )
- return docs
-
-
-def _getDocumentsFromUpstream(out: Any) -> list:
- """Unified: extract documents list from any node output.
- Supports: documents, documentList, data.documents.
- Also: input.upload result format { file, files, fileIds } - converts to doc refs with validationMetadata.fileId.
- """
- if not out or not isinstance(out, dict):
- return []
- docs = out.get("documents") or out.get("documentList")
- if not docs and isinstance(out.get("data"), dict):
- docs = out.get("data", {}).get("documents") or out.get("data", {}).get("documentList")
- if not docs:
- # input.upload task result: { file: {id, fileName}, files: [...], fileIds: [...] }
- def _file_to_doc(f: Any) -> Optional[Dict[str, Any]]:
- if isinstance(f, dict):
- fid = f.get("id")
- fname = f.get("fileName") or f.get("filename") or "file"
- if fid:
- return {
- "documentName": fname,
- "fileName": fname,
- "validationMetadata": {"fileId": str(fid)},
- }
- elif isinstance(f, str):
- return {"documentName": "file", "fileName": "file", "validationMetadata": {"fileId": f}}
- return None
-
- file_obj = out.get("file")
- files_arr = out.get("files") or []
- file_ids = out.get("fileIds") or []
- if file_obj:
- d = _file_to_doc(file_obj)
- if d:
- docs = [d]
- if not docs and files_arr:
- docs = [d for f in files_arr for d in [_file_to_doc(f)] if d]
- if not docs and file_ids:
- docs = [_file_to_doc(fid) for fid in file_ids if _file_to_doc(fid)]
- if not docs:
- return []
- return docs if isinstance(docs, (list, tuple)) else [docs]
-
-
-def _unpackIncomingEmail(incoming: Optional[tuple]) -> Optional[tuple]:
- """
- Unified handover: (context, documentList, reply_to, subject).
- Returns (ctx, doc_list, reply_to, subject) or None.
- """
- if not incoming or not isinstance(incoming, (list, tuple)):
- return None
- ctx = incoming[0] if len(incoming) > 0 else None
- doc_list = incoming[1] if len(incoming) > 1 else []
- reply_to = incoming[2] if len(incoming) > 2 else None
- subject = incoming[3] if len(incoming) > 3 else ""
- return (ctx, doc_list or [], reply_to, subject)
-
-
-def _getIncomingEmailFromUpstream(
- nodeId: str,
- inputSources: Dict[str, Dict[int, tuple]],
- nodeOutputs: Dict[str, Any],
- orderedNodes: List[Dict],
-) -> Optional[tuple]:
- """
- Walk upstream from draftEmail to find email.checkEmail/searchEmail and return (context, documentList).
- context = formatted incoming email(s) for composeAndDraftEmail.
- documentList = documents from the email node for attachment/context.
- """
- src = inputSources.get(nodeId, {}).get(0)
- if not src:
- return None
- srcId, _ = src
- srcNode = next((n for n in (orderedNodes or []) if n.get("id") == srcId), None)
- srcType = (srcNode or {}).get("type", "")
-
- # Direct connection to email node
- if srcType in ("email.checkEmail", "email.searchEmail"):
- out = nodeOutputs.get(srcId)
- return _formatEmailOutputAsContext(out)
-
- # Connected via AI node: walk one more step to email source
- if srcType.startswith("ai."):
- src2 = inputSources.get(srcId, {}).get(0)
- if not src2:
- return None
- emailNodeId, _ = src2
- emailNode = next((n for n in (orderedNodes or []) if n.get("id") == emailNodeId), None)
- if (emailNode or {}).get("type") in ("email.checkEmail", "email.searchEmail"):
- out = nodeOutputs.get(emailNodeId)
- return _formatEmailOutputAsContext(out)
- return None
-
-
-def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]:
- """Format email node output as (context, documentList, reply_to, subject) for composeAndDraftEmail.
- reply_to = sender address of first email (recipient for the reply).
- subject = original subject (for Re: prefix).
- Returns unified handover: (text, files/docs, reply_to, subject).
- """
- if not out:
- return None
- docs = _getDocumentsFromUpstream(out)
- if not docs:
- return None
- doc = docs[0] if isinstance(docs, list) else docs
- raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None)
- if not raw:
- return None
- import json
- try:
- data = json.loads(raw) if isinstance(raw, str) else raw
- except (json.JSONDecodeError, TypeError):
- return None
- if not isinstance(data, dict):
- return None
- # readEmails: data.emails.emails | searchEmails: data.searchResults.results
- emails_data = data.get("emails") or {}
- emails_list = emails_data.get("emails", []) if isinstance(emails_data, dict) else []
- if not emails_list:
- search_results = data.get("searchResults") or {}
- emails_list = search_results.get("results", []) if isinstance(search_results, dict) else []
- if not emails_list:
- return None
- reply_to = None
- first_subject = ""
- parts = ["Reply to the following email(s):", ""]
- for i, em in enumerate(emails_list[:5]): # max 5
- if not isinstance(em, dict):
- continue
- fr = em.get("from", em.get("sender", {}))
- addr = fr.get("emailAddress", {}) if isinstance(fr, dict) else {}
- from_str = addr.get("address", "") or addr.get("name", "")
- if from_str and not reply_to:
- reply_to = addr.get("address", "") or from_str
- subj = em.get("subject", "")
- if subj and not first_subject:
- first_subject = subj
- body = em.get("bodyPreview", "") or (em.get("body") or {}).get("content", "") if isinstance(em.get("body"), dict) else ""
- if body and len(str(body)) > 1500:
- body = str(body)[:1500] + "..."
- parts.append(f"From: {from_str}")
- parts.append(f"Subject: {subj}")
- parts.append(f"Content:\n{body}")
- parts.append("")
- if reply_to:
- parts.insert(2, f"Recipient (reply to this address): {reply_to}")
- parts.insert(3, "")
- context = "\n".join(parts).strip()
- return (context, docs, reply_to, first_subject)
-
-
-def _buildSearchQuery(
- query: str = None,
- fromAddress: str = None,
- toAddress: str = None,
- subjectContains: str = None,
- bodyContains: str = None,
- hasAttachment: bool = None,
- filter: str = None,
-) -> str:
- """
- Build Microsoft Graph $search query from discrete params.
- Uses KQL: from:, to:, subject:, body:, hasattachments: (supported by Graph API).
- """
- if filter and str(filter).strip():
- return str(filter).strip()
- parts = []
- if query and str(query).strip():
- parts.append(str(query).strip())
- if fromAddress and str(fromAddress).strip():
- safe = str(fromAddress).strip().replace('"', '')
- parts.append(f'from:{safe}')
- if toAddress and str(toAddress).strip():
- safe = str(toAddress).strip().replace('"', '')
- parts.append(f'to:{safe}')
- if subjectContains and str(subjectContains).strip():
- safe = str(subjectContains).strip().replace('"', '')
- parts.append(f'subject:{safe}')
- if bodyContains and str(bodyContains).strip():
- safe = str(bodyContains).strip().replace('"', '')
- parts.append(f'body:{safe}')
- if hasAttachment is True:
- parts.append("hasattachments:true")
- return " ".join(parts) if parts else "*"
-
-
def _buildEmailFilter(fromAddress: str = None, subjectContains: str = None, hasAttachment: bool = None) -> str:
- """
- Build Microsoft Graph API $filter string from discrete email filter params.
- Used for email.checkEmail (and trigger.newEmail).
- """
+ """Build Microsoft Graph API $filter string."""
parts = []
if fromAddress and str(fromAddress).strip():
safe = str(fromAddress).strip().replace("'", "''")
@@ -429,102 +93,106 @@ def _buildEmailFilter(fromAddress: str = None, subjectContains: str = None, hasA
return " and ".join(parts) if parts else ""
-def _buildActionParams(
- node: Dict[str, Any],
- nodeDef: Dict[str, Any],
- resolvedParams: Dict[str, Any],
- chatService,
- services=None,
-) -> Dict[str, Any]:
- """
- Build params for ActionExecutor from node parameters using _paramMap.
- Resolves connectionId -> connectionReference.
- Handles _contextFrom for composite params (e.g. email.draftEmail subject+body -> context).
- """
- params = dict(resolvedParams)
- paramMap = nodeDef.get("_paramMap") or {}
- contextFrom = nodeDef.get("_contextFrom") or []
+def _buildSearchQuery(
+ query: str = None, fromAddress: str = None, toAddress: str = None,
+ subjectContains: str = None, bodyContains: str = None,
+ hasAttachment: bool = None, filterStr: str = None,
+) -> str:
+ """Build Microsoft Graph $search query from discrete params."""
+ if filterStr and str(filterStr).strip():
+ return str(filterStr).strip()
+ parts = []
+ if query and str(query).strip():
+ parts.append(str(query).strip())
+ if fromAddress and str(fromAddress).strip():
+ parts.append(f'from:{str(fromAddress).strip().replace(chr(34), "")}')
+ if toAddress and str(toAddress).strip():
+ parts.append(f'to:{str(toAddress).strip().replace(chr(34), "")}')
+ if subjectContains and str(subjectContains).strip():
+ parts.append(f'subject:{str(subjectContains).strip().replace(chr(34), "")}')
+ if bodyContains and str(bodyContains).strip():
+ parts.append(f'body:{str(bodyContains).strip().replace(chr(34), "")}')
+ if hasAttachment is True:
+ parts.append("hasattachments:true")
+ return " ".join(parts) if parts else "*"
- # email.checkEmail: build filter from discrete params (fromAddress, subjectContains, hasAttachment)
- nodeType = node.get("type", "")
- if nodeType == "email.checkEmail":
- built = _buildEmailFilter(
- fromAddress=params.get("fromAddress"),
- subjectContains=params.get("subjectContains"),
- hasAttachment=params.get("hasAttachment"),
- )
- raw_filter = (params.get("filter") or "").strip()
- params["filter"] = built if built else (raw_filter if raw_filter else None)
- params.pop("fromAddress", None)
- params.pop("subjectContains", None)
- params.pop("hasAttachment", None)
- # email.searchEmail: build query from discrete params (fromAddress, toAddress, subjectContains, bodyContains, hasAttachment)
- if nodeType == "email.searchEmail":
- built = _buildSearchQuery(
- query=params.get("query"),
- fromAddress=params.get("fromAddress"),
- toAddress=params.get("toAddress"),
- subjectContains=params.get("subjectContains"),
- bodyContains=params.get("bodyContains"),
- hasAttachment=params.get("hasAttachment"),
- filter=params.get("filter"),
- )
- params["query"] = built
- params.pop("fromAddress", None)
- params.pop("toAddress", None)
- params.pop("subjectContains", None)
- params.pop("bodyContains", None)
- params.pop("hasAttachment", None)
- params.pop("filter", None)
+def _resolveConnectionParam(params: Dict, chatService, services) -> None:
+ """Resolve connectionReference if it looks like a UUID (UserConnection.id)."""
+ connRef = params.get("connectionReference")
+ if connRef and _isUserConnectionId(connRef):
+ resolved = _resolveConnectionIdToReference(chatService, connRef, services)
+ if resolved:
+ params["connectionReference"] = resolved
- # Resolve connectionId to connectionReference
- if "connectionId" in params:
- connId = params.get("connectionId")
- if connId:
- ref = _resolveConnectionIdToReference(chatService, connId, services)
- if ref:
- params["connectionReference"] = ref
- elif _is_user_connection_id(connId):
- # Automation2 worker often has no chat user connection list; pass UUID through —
- # method helpers (e.g. ClickupConnectionHelper) resolve via interfaceDbApp.getUserConnectionById.
- params["connectionReference"] = str(connId).strip()
- else:
- logger.warning(f"Could not resolve connectionId {connId} to connectionReference")
- params.pop("connectionId", None)
- # Build context from multiple params (e.g. subject + body for draft email)
- if contextFrom:
- parts = []
- for key in contextFrom:
- val = params.get(key)
- if val:
- if key == "subject":
- parts.append(f"Subject: {val}")
- elif key == "body":
- parts.append(f"Body:\n{val}")
- else:
- parts.append(str(val))
- if parts:
- params["context"] = "\n\n".join(parts)
- for k in contextFrom:
- params.pop(k, None)
+def _applyEmailCheckFilter(params: Dict) -> None:
+ """Build filter from discrete email params for email.checkEmail."""
+ built = _buildEmailFilter(
+ fromAddress=params.get("fromAddress"),
+ subjectContains=params.get("subjectContains"),
+ hasAttachment=params.get("hasAttachment"),
+ )
+ rawFilter = (params.get("filter") or "").strip()
+ params["filter"] = built if built else (rawFilter if rawFilter else None)
+ for k in ("fromAddress", "subjectContains", "hasAttachment"):
+ params.pop(k, None)
- # Apply paramMap: node param name -> action param name
- result = {}
- mappedNodeKeys = {nodeKey for nodeKey, actionKey in paramMap.items() if actionKey and nodeKey in params}
- for nodeKey, actionKey in paramMap.items():
- if nodeKey in params and actionKey:
- result[actionKey] = params[nodeKey]
- # Pass through params not used as source for mapping
- for k, v in params.items():
- if k not in mappedNodeKeys and k not in result:
- result[k] = v
- return result
+
+def _applyEmailSearchQuery(params: Dict) -> None:
+ """Build query from discrete email params for email.searchEmail."""
+ built = _buildSearchQuery(
+ query=params.get("query"),
+ fromAddress=params.get("fromAddress"),
+ toAddress=params.get("toAddress"),
+ subjectContains=params.get("subjectContains"),
+ bodyContains=params.get("bodyContains"),
+ hasAttachment=params.get("hasAttachment"),
+ filterStr=params.get("filter"),
+ )
+ params["query"] = built
+ for k in ("fromAddress", "toAddress", "subjectContains", "bodyContains", "hasAttachment", "filter"):
+ params.pop(k, None)
+
+
+def _wireHandover(nodeDef: Dict, inputSources: Dict, nodeOutputs: Dict, params: Dict) -> None:
+ """Apply wire-handover: extract fields from upstream using INPUT_EXTRACTORS."""
+ if 0 not in inputSources:
+ return
+ srcId, _ = inputSources[0]
+ upstream = nodeOutputs.get(srcId)
+ if not upstream or not isinstance(upstream, dict):
+ return
+
+ data = _unwrapTransit(upstream)
+ if not isinstance(data, dict):
+ return
+
+ inputPorts = nodeDef.get("inputPorts", {})
+ port0 = inputPorts.get(0, {})
+ accepts = port0.get("accepts", [])
+
+ for schemaName in accepts:
+ if schemaName == "Transit":
+ continue
+ extractor = INPUT_EXTRACTORS.get(schemaName)
+ if extractor:
+ extracted = extractor(data)
+ if extracted:
+ for k, v in extracted.items():
+ params.setdefault(k, v)
+ return
+
+
+def _getOutputSchemaName(nodeDef: Dict) -> str:
+ """Get the output schema name from the node definition."""
+ outputPorts = nodeDef.get("outputPorts", {})
+ port0 = outputPorts.get(0, {})
+ return port0.get("schema", "ActionResult")
class ActionNodeExecutor:
- """Execute ai.*, email.*, sharepoint.*, clickup.* nodes by mapping to method actions."""
+ """Execute action nodes by mapping to method actions via ActionExecutor."""
def __init__(self, services: Any):
self.services = services
@@ -534,7 +202,7 @@ class ActionNodeExecutor:
node: Dict[str, Any],
context: Dict[str, Any],
) -> Any:
- from modules.features.automation2.nodeRegistry import getNodeTypeToMethodAction
+ from modules.features.graphicalEditor.nodeRegistry import getNodeTypeToMethodAction
from modules.workflows.automation2.graphUtils import resolveParameterReferences
from modules.workflows.processing.core.actionExecutor import ActionExecutor
@@ -549,313 +217,147 @@ class ActionNodeExecutor:
return None
methodName, actionName = methodAction
- logger.info("ActionNodeExecutor node %s method=%s action=%s", nodeId, methodName, actionName)
+ nodeDef = _getNodeDefinition(nodeType) or {}
+ outputSchema = _getOutputSchemaName(nodeDef)
- nodeDef = _getNodeDefinition(nodeType)
+ # 1. Resolve parameters (DataRef, SystemVar, Static)
params = dict(node.get("parameters") or {})
resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {}))
- if nodeType == "clickup.updateTask":
- from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries
+ # 2. Wire-handover via extractors (fills missing params from upstream)
+ inputSources = context.get("inputSources", {}).get(nodeId, {})
+ _wireHandover(nodeDef, inputSources, context.get("nodeOutputs", {}), resolvedParams)
+
+ # 3. Apply defaults from parameter definitions
+ for pDef in nodeDef.get("parameters", []):
+ pName = pDef.get("name")
+ if pName and pName not in resolvedParams and "default" in pDef:
+ resolvedParams[pName] = pDef["default"]
+
+ # 4. Resolve connectionReference
+ chatService = getattr(self.services, "chat", None)
+ _resolveConnectionParam(resolvedParams, chatService, self.services)
+
+ # 5. Node-type-specific param transformations
+ if nodeType == "email.checkEmail":
+ _applyEmailCheckFilter(resolvedParams)
+ elif nodeType == "email.searchEmail":
+ _applyEmailSearchQuery(resolvedParams)
+ elif nodeType == "clickup.updateTask":
+ from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries
merge_clickup_task_update_entries(resolvedParams)
- # Merge input from connected nodes (unified handover: documents/documentList, context)
- inputSources = context.get("inputSources", {}).get(nodeId, {})
- if 0 in inputSources:
- srcId, _ = inputSources[0]
- inp = context.get("nodeOutputs", {}).get(srcId)
- docs = _getDocumentsFromUpstream(inp) if isinstance(inp, dict) else []
- if docs:
- resolvedParams.setdefault("documentList", docs)
- elif inp is not None:
- resolvedParams.setdefault("input", inp)
- # file.create: build context from contentSources (concatenated) or fallback to upstream
- if nodeType == "file.create":
- sources = resolvedParams.get("contentSources")
- if not isinstance(sources, list):
- sources = [resolvedParams.get("contentSource")] if resolvedParams.get("contentSource") else []
- parts = []
- for s in sources:
- if s is None or s == "":
- continue
- if isinstance(s, str):
- txt = s.strip()
- elif isinstance(s, dict):
- txt = _payloadToContext(s) if s else ""
- else:
- txt = str(s)
- if txt:
- parts.append(txt)
- upstream_context = _getContextFromUpstream(inp)
- if parts:
- parts_joined = "\n\n".join(parts)
- # When upstream is AI and user only selected prompt, use full context (prompt + response)
- if (
- isinstance(inp, dict)
- and upstream_context
- and len(upstream_context) > len(parts_joined)
- ):
- prompt_only = (inp.get("prompt") or "").strip()
- if prompt_only and parts_joined.strip() == prompt_only:
- resolvedParams["context"] = upstream_context
- else:
- resolvedParams["context"] = parts_joined
- else:
- resolvedParams["context"] = parts_joined
- else:
- if upstream_context:
- resolvedParams["context"] = upstream_context
-
- # ai.prompt with email upstream: inject actual email content into prompt so AI has context
- # (getChatDocumentsFromDocumentList fails in automation2 – workflow has no messages)
- if nodeType.startswith("ai."):
- orderedNodes = context.get("_orderedNodes") or []
- if 0 in inputSources:
- srcId, _ = inputSources[0]
- srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None)
- srcType = (srcNode or {}).get("type", "")
- if srcType in ("email.checkEmail", "email.searchEmail"):
- incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(
- nodeId,
- context.get("inputSources", {}),
- context.get("nodeOutputs", {}),
- orderedNodes,
- ))
- if incoming:
- ctx, _doc_list, _reply_to, _ = incoming
- if ctx and ctx.strip():
- # Set "prompt" so _paramMap (prompt→aiPrompt) passes it through to ai.process
- base_prompt = (
- (resolvedParams.get("prompt") or resolvedParams.get("aiPrompt") or "")
- ).strip()
- resolvedParams["prompt"] = (
- f"Eingehende E-Mail:\n{ctx}\n\nAufgabe: {base_prompt}"
- if base_prompt
- else f"Eingehende E-Mail:\n{ctx}"
- )
- logger.debug("ai.prompt: injected email context from upstream %s", srcType)
-
- chatService = getattr(self.services, "chat", None)
- actionParams = _buildActionParams(node, nodeDef or {}, resolvedParams, chatService, self.services)
-
- # ai.prompt: use simpleMode by default – direct AI call, no document pipeline (chapters/sections)
- # For short prompts like "formuliere eine passende email" this avoids ~13 AI calls and verbose output
- if nodeType == "ai.prompt" and "simpleMode" not in actionParams:
- actionParams["simpleMode"] = True
-
- # email.checkEmail: pause and wait for new email (background poller will resume)
+ # 6. email.checkEmail pause for email wait
if nodeType == "email.checkEmail":
runId = context.get("_runId")
workflowId = context.get("workflowId")
- connRef = actionParams.get("connectionReference")
+ connRef = resolvedParams.get("connectionReference")
if runId and workflowId and connRef:
from modules.workflows.automation2.executors import PauseForEmailWaitError
waitConfig = {
"connectionReference": connRef,
- "folder": actionParams.get("folder", "Inbox"),
- "limit": min(int(actionParams.get("limit") or 10), 50),
- "filter": actionParams.get("filter"),
+ "folder": resolvedParams.get("folder", "Inbox"),
+ "limit": min(int(resolvedParams.get("limit") or 10), 50),
+ "filter": resolvedParams.get("filter"),
}
raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig)
- # Fallback: no pause (calls readEmails directly) – needs runId, workflowId, connectionReference
- if not runId or not workflowId:
- logger.warning(
- "email.checkEmail not pausing (runId=%s workflowId=%s) – run must be saved/executed as workflow",
- runId,
- workflowId,
- )
- elif not connRef:
- logger.warning(
- "email.checkEmail not pausing – connectionReference missing (check connectionId/config)",
- )
- # email.draftEmail: use AI output as emailContent if available; else pass incoming email as context
+ # 7. AI nodes: simpleMode by default
+ if nodeType == "ai.prompt" and "simpleMode" not in resolvedParams:
+ resolvedParams["simpleMode"] = True
+
+ # 8. Build context for email.draftEmail from subject + body
if nodeType == "email.draftEmail":
- inputSources = context.get("inputSources", {})
- nodeOutputs = context.get("nodeOutputs", {})
- orderedNodes = context.get("_orderedNodes") or []
- if 0 in inputSources.get(nodeId, {}):
- srcId, _ = inputSources[nodeId][0]
- srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None)
- srcType = (srcNode or {}).get("type", "")
- if srcType.startswith("ai."):
- inp = nodeOutputs.get(srcId)
- email_content = _extractEmailContentFromUpstream(inp)
- # Reply flow: get incoming email metadata (replyTo, subject, original docs) when email->AI->draft
- incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes))
- reply_to = None
- reply_subject = None
- reply_docs = []
- if incoming:
- inc_ctx, doc_list, reply_to, first_subject = incoming
- reply_docs = doc_list
- reply_subject = ("Re: " + first_subject) if first_subject else None
- if email_content:
- # Merge reply metadata when available
- merged = dict(email_content)
- if reply_to and not merged.get("to"):
- merged["to"] = reply_to if isinstance(reply_to, list) else [reply_to]
- if reply_subject and not merged.get("subject"):
- merged["subject"] = reply_subject
- actionParams["emailContent"] = merged
- actionParams["context"] = merged.get("body", "") or "(from connected AI node)"
- if reply_docs:
- actionParams["replySourceDocuments"] = reply_docs
- # Attachments: gather from file nodes upstream of AI (e.g. downloadFile -> AI -> email)
- attachment_docs = _gatherAttachmentDocumentsFromUpstream(
- nodeId, inputSources, nodeOutputs, orderedNodes
- )
- if attachment_docs:
- existing = actionParams.get("documentList") or []
- # Prefer file docs from upstream; append any existing that look like binary attachments
- def _is_binary_attachment(d):
- if isinstance(d, dict) and d.get("documentData"):
- try:
- import json
- json.loads(d["documentData"])
- return False # JSON = email content, not attachment
- except (TypeError, ValueError):
- return True
- return bool(isinstance(d, dict) and (d.get("validationMetadata") or {}).get("fileId"))
- extra = [x for x in (existing if isinstance(existing, list) else []) if _is_binary_attachment(x)]
- actionParams["documentList"] = attachment_docs + extra
- if not email_content:
- # AI returns plain text or context: use as email body directly (no extra AI call)
- ctx = _getContextFromUpstream(inp)
- if ctx:
- # Reply flow: get incoming email metadata (replyTo, subject, original docs)
- incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes))
- reply_to = None
- reply_subject = None
- reply_docs = []
- if incoming:
- inc_ctx, doc_list, reply_to, first_subject = incoming
- reply_docs = doc_list
- reply_subject = ("Re: " + first_subject) if first_subject else None
- actionParams["emailContent"] = {
- "subject": reply_subject or actionParams.get("subject", "Draft"),
- "body": ctx,
- "to": [reply_to] if reply_to else (actionParams.get("to") or []),
- }
- actionParams["context"] = ctx
- if reply_to and not actionParams.get("to"):
- actionParams["to"] = [reply_to]
- # Reply flow: attach original email(s) for proper reply
- if reply_docs:
- actionParams["replySourceDocuments"] = reply_docs
- else:
- # Fallback: incoming email from upstream (AI returned nothing usable)
- incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes))
- if incoming:
- inc_ctx, doc_list, reply_to, first_subject = incoming
- actionParams["context"] = inc_ctx
- if doc_list and not actionParams.get("documentList"):
- actionParams["documentList"] = doc_list
- if reply_to and not actionParams.get("to"):
- actionParams["to"] = [reply_to]
- if first_subject and not actionParams.get("subject"):
- actionParams["subject"] = "Re: " + first_subject
- actionParams["replySourceDocuments"] = doc_list
- else:
- doc_count = len(_getDocumentsFromUpstream(inp))
- logger.warning(
- "email.draftEmail: AI upstream returned %d doc(s) but context extraction failed (no subject/body, no plain text). "
- "Ensure AI node outputs document with documentData.",
- doc_count,
- )
- actionParams["context"] = "(no context extracted from upstream – check AI node output)"
- elif srcType in ("sharepoint.downloadFile", "sharepoint.readFile"):
- # File itself is the context: pass as attachment, use filename as minimal context (no content extraction)
- if not actionParams.get("context"):
- inp = nodeOutputs.get(srcId)
- docs = _getDocumentsFromUpstream(inp)
- doc = docs[0] if docs else None
- name = None
- if isinstance(doc, dict):
- name = doc.get("documentName") or doc.get("fileName")
- elif doc and hasattr(doc, "documentName"):
- name = getattr(doc, "documentName", None) or getattr(doc, "fileName", None)
- ctx = name if name else "Attachment"
- actionParams["context"] = ctx
- actionParams["emailContent"] = {
- "subject": actionParams.get("subject", "Draft"),
- "body": ctx,
- "to": actionParams.get("to"),
- }
- # documentList already merged from upstream (file as attachment)
- else:
- # Direct connection to email.checkEmail/searchEmail: use incoming email as context
- if not actionParams.get("context"):
- incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes))
- if incoming:
- inc_ctx, doc_list, reply_to, first_subject = incoming
- actionParams["context"] = inc_ctx
- if doc_list and not actionParams.get("documentList"):
- actionParams["documentList"] = doc_list
- if reply_to and not actionParams.get("to"):
- actionParams["to"] = [reply_to]
- if first_subject and not actionParams.get("subject"):
- actionParams["subject"] = "Re: " + first_subject
- actionParams["replySourceDocuments"] = doc_list
+ subject = resolvedParams.get("subject", "")
+ body = resolvedParams.get("body", "")
+ if subject or body:
+ contextParts = []
+ if subject:
+ contextParts.append(f"Subject: {subject}")
+ if body:
+ contextParts.append(f"Body:\n{body}")
+ resolvedParams["context"] = "\n\n".join(contextParts)
+ resolvedParams.pop("subject", None)
+ resolvedParams.pop("body", None)
- # Generic context handover: when upstream provides documents, pass first doc as content for actions that expect it
- docList = actionParams.get("documentList") or resolvedParams.get("documentList")
- if docList and "content" not in actionParams:
- first = docList[0] if isinstance(docList, list) and docList else docList
- # Actions like sharepoint.uploadFile / clickup.uploadAttachment consume content from context
- actionParams["content"] = first
+ # 9. file.create: build context from upstream
+ if nodeType == "file.create" and "context" not in resolvedParams:
+ if 0 in inputSources:
+ srcId, _ = inputSources[0]
+ upstream = context.get("nodeOutputs", {}).get(srcId)
+ if upstream and isinstance(upstream, dict):
+ data = _unwrapTransit(upstream)
+ ctx = ""
+ if isinstance(data, dict):
+ ctx = data.get("context") or data.get("response") or data.get("text") or ""
+ if ctx:
+ resolvedParams["context"] = ctx
- executor = ActionExecutor(self.services)
- logger.info("ActionNodeExecutor node %s calling executeAction(%s, %s)", nodeId, methodName, actionName)
- result = await executor.executeAction(methodName, actionName, actionParams)
+ # 10. Pass upstream documents as documentList if available
+ # Use truthiness check: empty values ([], "", None) from static graph params
+ # must not block automatic upstream population via wire connections.
+ if not resolvedParams.get("documentList") and 0 in inputSources:
+ srcId, _ = inputSources[0]
+ upstream = context.get("nodeOutputs", {}).get(srcId)
+ if upstream and isinstance(upstream, dict):
+ data = _unwrapTransit(upstream)
+ if isinstance(data, dict):
+ docs = data.get("documents") or data.get("documentList")
+ if docs:
+ resolvedParams["documentList"] = docs
- # Extract context from result for unified output (AI text for downstream file nodes)
- extracted_context = _extractContextFromResult(result) if result else None
+ # 11. Execute action
+ logger.info("ActionNodeExecutor node %s calling %s.%s", nodeId, methodName, actionName)
+ try:
+ executor = ActionExecutor(self.services)
+ result = await executor.executeAction(methodName, actionName, resolvedParams)
+ except Exception as e:
+ logger.exception("ActionNodeExecutor node %s FAILED: %s", nodeId, e)
+ return _normalizeError(e, outputSchema)
- # AI nodes: include prompt in output; context = prompt + AI response (für file.create etc.)
- prompt_text = (resolvedParams.get("prompt") or resolvedParams.get("aiPrompt") or "")
- if not isinstance(prompt_text, str):
- prompt_text = str(prompt_text) if prompt_text else ""
- prompt_text = (prompt_text or "").strip()
- if nodeType.startswith("ai.") and prompt_text:
- full_context = (
- f"{prompt_text}\n\n{extracted_context}" if extracted_context else prompt_text
- )
- else:
- full_context = extracted_context or ""
- out_prompt = prompt_text if nodeType.startswith("ai.") else ""
+ # 12. Build normalized output
+ docsList = [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])]
- docs_list = [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])]
+ extractedContext = ""
+ if result.documents:
+ doc = result.documents[0]
+ raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None)
+ if raw:
+ extractedContext = raw.decode("utf-8", errors="replace").strip() if isinstance(raw, bytes) else str(raw).strip()
- # result = AI response text (for contentSources refs: prompt + context + result = full output, optionally duplicated)
- out_result = extracted_context if nodeType.startswith("ai.") else None
+ promptText = str(resolvedParams.get("aiPrompt") or resolvedParams.get("prompt") or "").strip()
out = {
"success": result.success,
"error": result.error,
- "documents": docs_list,
- "documentList": docs_list,
- "prompt": out_prompt,
- "context": full_context,
- "result": out_result,
+ "documents": docsList,
+ "documentList": docsList,
"data": result.model_dump() if hasattr(result, "model_dump") else {"success": result.success, "error": result.error},
}
- if result.success and docs_list and nodeType.startswith("clickup."):
+
+ if nodeType.startswith("ai."):
+ out["prompt"] = promptText
+ out["response"] = extractedContext
+ out["context"] = f"{promptText}\n\n{extractedContext}" if promptText and extractedContext else (extractedContext or promptText)
+ # Structured output
+ if extractedContext:
+ try:
+ parsed = json.loads(extractedContext)
+ if isinstance(parsed, dict):
+ out["responseData"] = parsed
+ except (json.JSONDecodeError, TypeError):
+ pass
+
+ if nodeType.startswith("clickup.") and result.success and docsList:
try:
- d0 = docs_list[0] if isinstance(docs_list[0], dict) else {}
+ d0 = docsList[0] if isinstance(docsList[0], dict) else {}
raw = d0.get("documentData")
if isinstance(raw, str) and raw.strip():
parsed = json.loads(raw)
if isinstance(parsed, dict) and parsed.get("id") is not None:
out["taskId"] = str(parsed["id"])
- out["clickupTask"] = parsed
+ out["task"] = parsed
except (json.JSONDecodeError, TypeError, ValueError):
pass
- logger.info(
- "ActionNodeExecutor node %s result: success=%s error=%s doc_count=%d",
- nodeId,
- result.success,
- result.error,
- len(out.get("documents", [])),
- )
- return out
+
+ return _normalizeToSchema(out, outputSchema)
diff --git a/modules/workflows/automation2/executors/dataExecutor.py b/modules/workflows/automation2/executors/dataExecutor.py
new file mode 100644
index 00000000..8da5cd75
--- /dev/null
+++ b/modules/workflows/automation2/executors/dataExecutor.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2025 Patrick Motsch
+# Data manipulation node executor: data.aggregate, data.transform, data.filter.
+
+import logging
+from typing import Any, Dict
+
+from modules.features.graphicalEditor.portTypes import _unwrapTransit, _wrapTransit
+
+logger = logging.getLogger(__name__)
+
+
+class DataExecutor:
+ """Execute data.aggregate, data.transform, data.filter nodes."""
+
+ async def execute(
+ self,
+ node: Dict[str, Any],
+ context: Dict[str, Any],
+ ) -> Any:
+ nodeType = node.get("type", "")
+ nodeId = node.get("id", "")
+ nodeOutputs = context.get("nodeOutputs", {})
+ inputSources = context.get("inputSources", {}).get(nodeId, {})
+
+ logger.info("DataExecutor node %s type=%s", nodeId, nodeType)
+
+ if nodeType == "data.aggregate":
+ return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context)
+ if nodeType == "data.transform":
+ return await self._transform(node, nodeOutputs, nodeId, inputSources)
+ if nodeType == "data.filter":
+ return await self._filter(node, nodeOutputs, nodeId, inputSources)
+
+ logger.debug("DataExecutor node %s unhandled type %s", nodeId, nodeType)
+ return None
+
+ async def _aggregate(
+ self,
+ node: Dict,
+ nodeOutputs: Dict,
+ nodeId: str,
+ inputSources: Dict,
+ context: Dict,
+ ) -> Any:
+ """
+ In loop context: accumulation is handled by the engine (_aggregateAccumulators).
+ Outside loop: collect the single input.
+ """
+ inp = self._getInput(inputSources, nodeOutputs)
+ mode = (node.get("parameters") or {}).get("mode", "collect")
+
+ if inp is None:
+ return {"items": [], "count": 0, "_success": True}
+
+ data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
+
+ if mode == "collect":
+ items = [data] if data is not None else []
+ elif mode == "concat":
+ items = data if isinstance(data, list) else [data] if data is not None else []
+ elif mode == "sum":
+ val = data if isinstance(data, (int, float)) else 0
+ items = [val]
+ elif mode == "count":
+ items = [1] if data is not None else []
+ else:
+ items = [data] if data is not None else []
+
+ return {"items": items, "count": len(items), "_success": True}
+
+ async def _transform(
+ self,
+ node: Dict,
+ nodeOutputs: Dict,
+ nodeId: str,
+ inputSources: Dict,
+ ) -> Any:
+ """Apply mappings to restructure data."""
+ from modules.workflows.automation2.graphUtils import resolveParameterReferences
+
+ inp = self._getInput(inputSources, nodeOutputs)
+ data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
+ mappings = (node.get("parameters") or {}).get("mappings", [])
+
+ result = {}
+ for mapping in mappings:
+ if not isinstance(mapping, dict):
+ continue
+ outputField = mapping.get("outputField")
+ if not outputField:
+ continue
+ source = mapping.get("source")
+ if source and isinstance(source, dict) and source.get("type") == "ref":
+ resolved = resolveParameterReferences(source, nodeOutputs)
+ result[outputField] = resolved
+ elif source and isinstance(source, dict) and source.get("type") == "value":
+ result[outputField] = source.get("value")
+ elif isinstance(data, dict) and mapping.get("sourceField"):
+ result[outputField] = data.get(mapping["sourceField"])
+ else:
+ result[outputField] = source
+
+ result["_success"] = True
+ return result
+
+ async def _filter(
+ self,
+ node: Dict,
+ nodeOutputs: Dict,
+ nodeId: str,
+ inputSources: Dict,
+ ) -> Any:
+ """Filter items by condition expression. Returns Transit envelope."""
+ inp = self._getInput(inputSources, nodeOutputs)
+ data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp
+ condition = (node.get("parameters") or {}).get("condition", "")
+
+ items = self._extractItems(data)
+ originalCount = len(items)
+
+ if not condition:
+ filtered = items
+ else:
+ filtered = [item for item in items if self._evalFilterCondition(item, condition)]
+
+ filteredData = data
+ if isinstance(data, dict):
+ filteredData = dict(data)
+ listKey = self._findListKey(data)
+ if listKey:
+ filteredData[listKey] = filtered
+ elif isinstance(data, list):
+ filteredData = filtered
+
+ return _wrapTransit(filteredData, {
+ "originalCount": originalCount,
+ "filteredCount": len(filtered),
+ })
+
+ def _getInput(self, inputSources: Dict, nodeOutputs: Dict) -> Any:
+ """Get data from the first connected input port."""
+ if 0 not in inputSources:
+ return None
+ srcId, _ = inputSources[0]
+ return nodeOutputs.get(srcId)
+
+ def _extractItems(self, data: Any) -> list:
+ """Extract the list of items from various data shapes."""
+ if isinstance(data, list):
+ return data
+ if isinstance(data, dict):
+ for key in ("items", "tasks", "emails", "files", "documents", "documentList"):
+ val = data.get(key)
+ if isinstance(val, list):
+ return val
+ return []
+
+ def _findListKey(self, data: Dict) -> str:
+ """Find the key that holds the main list in a dict."""
+ for key in ("items", "tasks", "emails", "files", "documents", "documentList"):
+ if isinstance(data.get(key), list):
+ return key
+ return ""
+
+ def _evalFilterCondition(self, item: Any, condition: Any) -> bool:
+ """
+ Evaluate a filter condition against a single item.
+ Supports structured conditions {field, operator, value} or simple string expressions.
+ """
+ if isinstance(condition, dict):
+ field = condition.get("field", "")
+ operator = condition.get("operator", "eq")
+ value = condition.get("value")
+ left = item.get(field) if isinstance(item, dict) else item
+ return self._compareValues(left, operator, value)
+
+ if isinstance(condition, str) and condition.strip():
+ try:
+ if isinstance(item, dict):
+ return bool(eval(condition, {"__builtins__": {}}, item))
+ return bool(item)
+ except Exception as e:
+ logger.warning(f"_evalFilterCondition eval failed for condition='{condition}': {e}")
+ return True
+
+ return True
+
+ def _compareValues(self, left: Any, operator: str, right: Any) -> bool:
+ """Compare two values with the given operator."""
+ if operator == "eq":
+ return left == right
+ if operator == "neq":
+ return left != right
+ if operator == "contains":
+ return right is not None and str(right) in str(left or "")
+ if operator == "startsWith":
+ return str(left or "").startswith(str(right or ""))
+ if operator == "isEmpty":
+ return left is None or left == "" or (isinstance(left, (list, dict)) and len(left) == 0)
+ if operator == "isNotEmpty":
+ return left is not None and left != "" and (not isinstance(left, (list, dict)) or len(left) > 0)
+ if operator in ("lt", "lte", "gt", "gte"):
+ try:
+ l = float(left) if left is not None else 0
+ r = float(right) if right is not None else 0
+ if operator == "lt":
+ return l < r
+ if operator == "lte":
+ return l <= r
+ if operator == "gt":
+ return l > r
+ return l >= r
+ except (TypeError, ValueError):
+ return False
+ return True
diff --git a/modules/workflows/automation2/executors/flowExecutor.py b/modules/workflows/automation2/executors/flowExecutor.py
index 0df17335..0d50aa4e 100644
--- a/modules/workflows/automation2/executors/flowExecutor.py
+++ b/modules/workflows/automation2/executors/flowExecutor.py
@@ -1,9 +1,11 @@
# Copyright (c) 2025 Patrick Motsch
-# Flow control node executor (ifElse, switch, loop).
+# Flow control node executor (ifElse, switch, loop, merge).
import logging
from typing import Any, Dict
+from modules.features.graphicalEditor.portTypes import _wrapTransit, _unwrapTransit
+
logger = logging.getLogger(__name__)
@@ -30,16 +32,20 @@ class FlowExecutor:
if nodeType == "flow.ifElse":
out = await self._ifElse(node, nodeOutputs, nodeId, inputSources)
- logger.info("FlowExecutor node %s ifElse -> %s", nodeId, out)
+ logger.info("FlowExecutor node %s ifElse -> branch=%s", nodeId, out.get("_meta", {}).get("branch"))
return out
if nodeType == "flow.switch":
out = await self._switch(node, nodeOutputs, nodeId, inputSources)
- logger.info("FlowExecutor node %s switch -> %s", nodeId, out)
+ logger.info("FlowExecutor node %s switch -> match=%s", nodeId, out.get("_meta", {}).get("match"))
return out
if nodeType == "flow.loop":
out = await self._loop(node, nodeOutputs, nodeId, inputSources)
logger.info("FlowExecutor node %s loop -> %s", nodeId, out)
return out
+ if nodeType == "flow.merge":
+ out = await self._merge(node, nodeOutputs, nodeId, inputSources, context)
+ logger.info("FlowExecutor node %s merge -> keys=%s", nodeId, list(out.keys()) if isinstance(out, dict) else None)
+ return out
logger.debug("FlowExecutor node %s unhandled type %s -> None", nodeId, nodeType)
return None
@@ -62,7 +68,10 @@ class FlowExecutor:
condParam = (node.get("parameters") or {}).get("condition")
inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs)
ok = self._evalConditionParam(condParam, nodeOutputs)
- return {"branch": 0 if ok else 1, "conditionResult": ok, "input": inp}
+ return _wrapTransit(
+ _unwrapTransit(inp) if inp else inp,
+ {"branch": 0 if ok else 1, "conditionResult": ok},
+ )
def _evalConditionParam(self, condParam: Any, nodeOutputs: Dict) -> bool:
"""Evaluate condition: structured {type,ref,operator,value} or legacy string/ref."""
@@ -170,7 +179,8 @@ class FlowExecutor:
if a is None or b is None:
return False
return op(a, b)
- except Exception:
+ except Exception as e:
+ logger.warning(f"_compare_dates failed: left={left}, right={right}: {e}")
return False
def _file_exists(self, val: Any) -> bool:
@@ -192,7 +202,8 @@ class FlowExecutor:
if isinstance(resolved, str):
try:
return bool(eval(resolved))
- except Exception:
+ except Exception as e:
+ logger.warning(f"_evalCondition eval failed for expression: {e}")
return bool(resolved)
return bool(resolved)
@@ -201,10 +212,17 @@ class FlowExecutor:
from modules.workflows.automation2.graphUtils import resolveParameterReferences
value = resolveParameterReferences(valueExpr, nodeOutputs)
cases = (node.get("parameters") or {}).get("cases", [])
+ inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs)
for i, c in enumerate(cases):
if self._evalSwitchCase(value, c):
- return {"match": i, "value": value}
- return {"match": -1, "value": value}
+ return _wrapTransit(
+ _unwrapTransit(inp) if inp else inp,
+ {"match": i, "value": value},
+ )
+ return _wrapTransit(
+ _unwrapTransit(inp) if inp else inp,
+ {"match": -1, "value": value},
+ )
def _evalSwitchCase(self, left: Any, case: Any) -> bool:
"""
@@ -265,8 +283,47 @@ class FlowExecutor:
if isinstance(items, list):
pass
elif isinstance(items, dict):
- # Convert form payload / object to list of {name, value} for "for each field"
items = [{"name": k, "value": v} for k, v in items.items()]
else:
items = [items] if items is not None else []
return {"items": items, "count": len(items)}
+
+ async def _merge(self, node: Dict, nodeOutputs: Dict, nodeId: str, inputSources: Dict, context: Dict) -> Any:
+ """Merge multiple branch inputs. mode: first | all | append."""
+ mode = (node.get("parameters") or {}).get("mode", "first")
+ inputs: Dict[int, Any] = {}
+ for portIdx, (srcId, srcOut) in inputSources.items():
+ out = nodeOutputs.get(srcId)
+ if out is not None:
+ inputs[portIdx] = _unwrapTransit(out)
+
+ first = None
+ merged: Dict = {}
+ for idx in sorted(inputs.keys()):
+ val = inputs[idx]
+ if first is None:
+ first = val
+ if isinstance(val, dict):
+ merged.update(val)
+
+ if mode == "first":
+ pass
+ elif mode == "all":
+ pass
+ elif mode == "append":
+ allItems = []
+ for val in inputs.values():
+ if isinstance(val, list):
+ allItems.extend(val)
+ elif isinstance(val, dict) and "items" in val:
+ allItems.extend(val["items"])
+ elif val is not None:
+ allItems.append(val)
+ merged["items"] = allItems
+
+ return {
+ "inputs": inputs,
+ "first": first,
+ "merged": merged,
+ "_success": True,
+ }
diff --git a/modules/workflows/automation2/graphUtils.py b/modules/workflows/automation2/graphUtils.py
index 0f79b882..1cd2dc3e 100644
--- a/modules/workflows/automation2/graphUtils.py
+++ b/modules/workflows/automation2/graphUtils.py
@@ -113,6 +113,11 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
if nid not in nodeIds:
errors.append(f"Connection references non-existent node {nid}")
+ # Soft port compatibility check (warnings, not errors)
+ warnings = _checkPortCompatibility(nodes, connMap)
+ if warnings:
+ logger.info("validateGraph port warnings: %s", warnings)
+
if errors:
logger.debug("validateGraph errors: %s", errors)
else:
@@ -120,6 +125,55 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]:
return errors
+def _checkPortCompatibility(
+ nodes: List[Dict],
+ connMap: Dict[str, List[Tuple[str, int, int]]],
+) -> List[str]:
+ """
+ Soft check: warn if connected port types are incompatible.
+ Returns warnings (never blocks execution).
+ """
+ from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES
+
+ nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES}
+ nodeById = {n["id"]: n for n in nodes if n.get("id")}
+ warnings = []
+
+ for tgt, pairs in connMap.items():
+ tgtNode = nodeById.get(tgt)
+ if not tgtNode:
+ continue
+ tgtDef = nodeDefMap.get(tgtNode.get("type", ""))
+ if not tgtDef:
+ continue
+ tgtInputPorts = tgtDef.get("inputPorts", {})
+
+ for src, srcOut, tgtIn in pairs:
+ srcNode = nodeById.get(src)
+ if not srcNode:
+ continue
+ srcDef = nodeDefMap.get(srcNode.get("type", ""))
+ if not srcDef:
+ continue
+ srcOutputPorts = srcDef.get("outputPorts", {})
+ srcPort = srcOutputPorts.get(srcOut, {})
+ tgtPort = tgtInputPorts.get(tgtIn, {})
+
+ srcSchema = srcPort.get("schema", "")
+ accepts = tgtPort.get("accepts", [])
+
+ if not accepts or not srcSchema:
+ continue
+ if "Transit" in accepts:
+ continue
+ if srcSchema not in accepts:
+ warnings.append(
+ f"Port mismatch: {src}[out:{srcOut}] ({srcSchema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})"
+ )
+
+ return warnings
+
+
def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, int]]]) -> List[Dict]:
"""
Topological sort: start from trigger nodes, then BFS by connections.
@@ -198,9 +252,11 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
path = value.get("path")
if node_id is not None and isinstance(path, (list, tuple)):
data = nodeOutputs.get(node_id)
+ # Unwrap transit envelopes to access the real data
+ if isinstance(data, dict) and data.get("_transit"):
+ data = data.get("data", data)
plist = list(path)
resolved = _get_by_path(data, plist)
- # input.form historically stored flat field dict; refs use payload.
if (
resolved is None
and isinstance(data, dict)
@@ -214,6 +270,10 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
if value.get("type") == "value":
inner = value.get("value")
return resolveParameterReferences(inner, nodeOutputs)
+ if value.get("type") == "system":
+ variable = value.get("variable", "")
+ from modules.features.graphicalEditor.portTypes import _resolveSystemVariable
+ return _resolveSystemVariable(variable, nodeOutputs.get("_context", {}))
return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()}
if isinstance(value, str):
diff --git a/modules/workflows/automation2/subAutomation2Schedule.py b/modules/workflows/automation2/subAutomation2Schedule.py
deleted file mode 100644
index d0fb3cd8..00000000
--- a/modules/workflows/automation2/subAutomation2Schedule.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright (c) 2025 Patrick Motsch
-"""
-Automation2 schedule scheduler.
-Starts/stops cron jobs for workflows with schedule entry points.
-"""
-
-import asyncio
-import logging
-from typing import Any, Dict
-
-from modules.shared.eventManagement import eventManager
-
-# Main loop reference for scheduling async work from job executor (may run in thread)
-_main_loop = None
-
-
-def set_main_loop(loop) -> None:
- global _main_loop
- _main_loop = loop
-from modules.features.automation2.interfaceFeatureAutomation2 import (
- getAutomation2Interface,
- getAllWorkflowsForScheduling,
-)
-from modules.features.automation2.mainAutomation2 import getAutomation2Services
-from modules.features.automation2.entryPoints import find_invocation
-from modules.workflows.automation2.scheduleCron import parse_cron_to_kwargs
-
-
-def _cron_to_interval_seconds(cron: str):
- """
- If cron represents a simple interval, return seconds. Otherwise None.
- E.g. "* * * * *" -> 60, "*/15 * * * *" -> 900, "*/30 * * * * *" -> 30.
- """
- if not cron or not isinstance(cron, str):
- return None
- parts = cron.strip().split()
- if len(parts) == 5:
- minute, hour, day, month, dow = parts
- second = "0"
- elif len(parts) == 6:
- second, minute, hour, day, month, dow = parts
- else:
- return None
- # Interval minutes: */N * * * *
- if minute.startswith("*/") and hour == "*" and day == "*" and month == "*" and dow == "*":
- n = int(minute[2:]) if minute[2:].isdigit() else 0
- if n > 0:
- return n * 60
- # Every minute: * * * * *
- if minute == "*" and hour == "*" and day == "*" and month == "*" and dow == "*" and second == "0":
- return 60
- # Interval hours: 0 */N * * *
- if minute == "0" and hour.startswith("*/") and day == "*" and month == "*" and dow == "*":
- n = int(hour[2:]) if hour[2:].isdigit() else 0
- if n > 0:
- return n * 3600
- # Interval seconds: */N * * * * * (6-field)
- if len(parts) == 6 and second.startswith("*/") and minute == "*" and hour == "*" and day == "*" and month == "*" and dow in ("*", "?"):
- n = int(second[2:]) if second[2:].isdigit() else 0
- if n > 0:
- return n
- return None
-from modules.workflows.automation2.executionEngine import executeGraph
-from modules.workflows.automation2.runEnvelope import default_run_envelope, normalize_run_envelope
-
-logger = logging.getLogger(__name__)
-
-JOB_ID_PREFIX = "automation2."
-
-
-def _remove_all_automation2_schedule_jobs() -> None:
- """Remove all registered Automation2 schedule jobs from the scheduler."""
- if not eventManager.scheduler:
- return
- for job in list(eventManager.scheduler.get_jobs()):
- jid = job.id if hasattr(job, "id") else str(job)
- if jid.startswith(JOB_ID_PREFIX):
- try:
- eventManager.remove(jid)
- except Exception as e:
- logger.debug("Could not remove job %s: %s", jid, e)
-
-
-def sync_automation2_schedule_events(event_user) -> Dict[str, Any]:
- """
- Sync scheduler with all active Automation2 workflows that have schedule entry points.
- Registers cron jobs for each; removes jobs for workflows no longer in the list.
- """
- if not event_user:
- logger.warning("Automation2 schedule: No event user, skipping sync")
- return {"synced": 0, "events": {}}
-
- _remove_all_automation2_schedule_jobs()
-
- items = getAllWorkflowsForScheduling()
- registered = {}
- logger.info(
- "Automation2 schedule: found %d workflow(s) with trigger.schedule and cron",
- len(items),
- )
-
- for item in items:
- workflow_id = item.get("workflowId")
- mandate_id = item.get("mandateId")
- instance_id = item.get("featureInstanceId")
- entry_point_id = item.get("entryPointId")
- cron = item.get("cron")
- workflow = item.get("workflow")
-
- if not workflow_id or not instance_id or not cron:
- continue
-
- job_id = f"{JOB_ID_PREFIX}{workflow_id}"
- async_handler = _create_schedule_handler(
- workflow_id=workflow_id,
- mandate_id=mandate_id,
- instance_id=instance_id,
- entry_point_id=entry_point_id,
- workflow=workflow,
- event_user=event_user,
- )
-
- # Sync wrapper: schedule async handler on main loop (job may run in executor thread)
- def sync_wrapper():
- loop = _main_loop
- if loop and loop.is_running():
- loop.call_soon_threadsafe(
- lambda: asyncio.ensure_future(async_handler(), loop=loop)
- )
- else:
- # Fallback: run inline if no loop (shouldn't happen)
- try:
- asyncio.run(async_handler())
- except RuntimeError:
- logger.warning("Automation2 schedule: could not run handler, no event loop")
-
- # Use IntervalTrigger for "every N minutes" - more reliable than CronTrigger
- interval_seconds = _cron_to_interval_seconds(cron)
- if interval_seconds is not None:
- eventManager.registerInterval(
- jobId=job_id,
- func=sync_wrapper,
- seconds=interval_seconds,
- replaceExisting=True,
- )
- else:
- try:
- cron_kwargs = parse_cron_to_kwargs(cron)
- eventManager.registerCron(
- jobId=job_id,
- func=sync_wrapper,
- cronKwargs=cron_kwargs,
- replaceExisting=True,
- )
- except ValueError as e:
- logger.warning("Workflow %s: invalid cron %r: %s", workflow_id, cron, e)
- continue
- registered[workflow_id] = job_id
- mode = "interval" if interval_seconds is not None else "cron"
- logger.info(
- "Automation2 schedule: registered %s for workflow %s (%s=%s)",
- job_id,
- workflow_id,
- mode,
- interval_seconds if interval_seconds is not None else cron,
- )
-
- if not registered and items:
- logger.warning("Automation2 schedule: workflows found but none registered (check cron format)")
- elif not items:
- logger.info("Automation2 schedule: no workflows with trigger.schedule+cron (save workflow after selecting Zeitplan)")
- return {"synced": len(registered), "workflowsFound": len(items), "events": registered}
-
-
-def _create_schedule_handler(
- workflow_id: str,
- mandate_id: str,
- instance_id: str,
- entry_point_id: str,
- workflow: Dict[str, Any],
- event_user,
-):
- """Create async handler for scheduled workflow execution."""
-
- async def handler():
- logger.info("Automation2 schedule: CRON FIRED for workflow %s", workflow_id)
- try:
- if not event_user:
- logger.error("Automation2 schedule: event user not available")
- return
-
- a2 = getAutomation2Interface(event_user, mandate_id, instance_id)
- wf = a2.getWorkflow(workflow_id)
- if not wf or not wf.get("graph"):
- logger.warning("Automation2 schedule: workflow %s not found or no graph", workflow_id)
- return
- if not wf.get("active", True):
- logger.info("Automation2 schedule: workflow %s inactive, skipping", workflow_id)
- return
-
- inv = find_invocation(wf, entry_point_id)
- if inv and (inv.get("kind") != "schedule" or not inv.get("enabled", True)):
- logger.info("Automation2 schedule: entry point %s disabled for workflow %s", entry_point_id, workflow_id)
- return
- # If inv not found but graph has trigger.schedule, proceed (invocations may not be synced)
-
- services = getAutomation2Services(
- event_user,
- mandateId=mandate_id,
- featureInstanceId=instance_id,
- )
- from modules.workflows.processing.shared.methodDiscovery import discoverMethods
- discoverMethods(services)
-
- title = (inv or {}).get("title") or {}
- label = ""
- if isinstance(title, dict):
- label = title.get("en") or title.get("de") or ""
- elif isinstance(title, str):
- label = title
-
- run_env = default_run_envelope(
- "schedule",
- entry_point_id=entry_point_id,
- entry_point_label=label or None,
- )
- run_env = normalize_run_envelope(run_env, user_id=str(event_user.id) if event_user else None)
-
- # userId=None so tasks are created unassigned and visible to all instance users
- result = await executeGraph(
- graph=wf["graph"],
- services=services,
- workflowId=workflow_id,
- instanceId=instance_id,
- userId=None,
- mandateId=mandate_id,
- automation2_interface=a2,
- run_envelope=run_env,
- )
- logger.info(
- "Automation2 schedule: executed workflow %s success=%s paused=%s",
- workflow_id,
- result.get("success"),
- result.get("paused"),
- )
- except Exception as e:
- logger.exception("Automation2 schedule: failed to execute workflow %s: %s", workflow_id, e)
-
- return handler
-
-
-def start(event_user) -> bool:
- """
- Start Automation2 schedule scheduler and sync scheduled workflows.
- Registers callback so schedule is re-synced when workflows are created/updated/deleted.
- """
- if not event_user:
- logger.warning("Automation2 schedule: No event user provided, skipping")
- return True
-
- try:
- eventManager.start()
- sync_automation2_schedule_events(event_user)
- logger.info("Automation2 schedule: sync complete")
-
- # Delayed sync (5s) in case DB was not ready at startup
- def do_delayed_sync():
- import threading
- def _run():
- import time
- time.sleep(5)
- try:
- sync_automation2_schedule_events(event_user)
- logger.info("Automation2 schedule: delayed sync done")
- except Exception as e:
- logger.warning("Automation2 schedule: delayed sync failed: %s", e)
- t = threading.Thread(target=_run, daemon=True)
- t.start()
- do_delayed_sync()
-
- def on_workflow_changed(_context=None):
- try:
- sync_automation2_schedule_events(event_user)
- logger.debug("Automation2 schedule: re-synced after workflow change")
- except Exception as e:
- logger.warning("Automation2 schedule: re-sync failed: %s", e)
-
- from modules.shared.callbackRegistry import callbackRegistry
- callbackRegistry.register("automation2.workflow.changed", on_workflow_changed)
- except Exception as e:
- logger.error("Automation2 schedule: Failed to start: %s", e)
- return False
-
- return True
-
-
-def stop(event_user) -> bool:
- """Stop Automation2 schedule scheduler (remove all schedule jobs)."""
- try:
- _remove_all_automation2_schedule_jobs()
- logger.info("Automation2 schedule: all jobs removed")
- except Exception as e:
- logger.warning("Automation2 schedule: error during stop: %s", e)
- return True
diff --git a/modules/workflows/methods/methodBase.py b/modules/workflows/methods/methodBase.py
index 1a81c3eb..6a9f2956 100644
--- a/modules/workflows/methods/methodBase.py
+++ b/modules/workflows/methods/methodBase.py
@@ -243,8 +243,10 @@ class MethodBase:
# Handle List[str], List[int], etc.
if expectedType.startswith('List['):
- if not isinstance(value, list):
- raise ValueError(f"Expected list for type '{expectedType}', got {type(value).__name__}")
+ if isinstance(value, str):
+ value = [v.strip() for v in value.split(',') if v.strip()] if ',' in value else [value]
+ elif not isinstance(value, list):
+ value = [value]
# Extract inner type
innerType = expectedType[5:-1].strip() # Remove "List[" and "]"
if innerType in typeMap:
diff --git a/modules/workflows/methods/methodFile/actions/create.py b/modules/workflows/methods/methodFile/actions/create.py
index 73816da0..b9aafcd3 100644
--- a/modules/workflows/methods/methodFile/actions/create.py
+++ b/modules/workflows/methods/methodFile/actions/create.py
@@ -7,6 +7,7 @@ from typing import Dict, Any
from modules.datamodels.datamodelChat import ActionResult, ActionDocument
from modules.serviceCenter.services.serviceGeneration.subDocumentUtility import markdownToDocumentJson
+from modules.shared.i18nRegistry import normalizePrimaryLanguageTag
logger = logging.getLogger(__name__)
@@ -84,7 +85,10 @@ async def create(self, parameters: Dict[str, Any]) -> ActionResult:
outputFormat = (parameters.get("outputFormat") or "docx").strip().lower().lstrip(".")
title = (parameters.get("title") or "Document").strip()
templateName = parameters.get("templateName")
- language = (parameters.get("language") or "de").strip()[:2]
+ language = normalizePrimaryLanguageTag(
+ str(parameters.get("language") or "de"),
+ "de",
+ )
try:
structured_content = markdownToDocumentJson(context, title, language)
diff --git a/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py b/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py
index 07e9a046..0502c6f6 100644
--- a/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py
+++ b/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py
@@ -15,7 +15,7 @@ import io
from datetime import datetime, timezone
from typing import Dict, Any, List, Optional, Tuple
-from modules.datamodels.datamodelChat import ActionResult, ActionDocument, ChatDocument
+from modules.datamodels.datamodelChat import ActionResult, ActionDocument, ChatDocument, ChatMessage
from modules.datamodels.datamodelDocref import DocumentReferenceList, DocumentItemReference
from modules.datamodels.datamodelAi import AiCallOptions, AiCallRequest, OperationTypeEnum
@@ -500,10 +500,13 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
if not filesToProcess:
return ActionResult.isSuccess(documents=[])
- # Attach all files as ChatDocuments to the workflow so AI can resolve them
- chatDocDumps = []
+ # Attach all files as ChatDocuments so AI can resolve them via DocumentReferenceList.
+ # When running inside the graph engine there is no real ChatWorkflow (workflow.id is None),
+ # so we create in-memory ChatDocument objects and inject them directly into the placeholder
+ # workflow's messages list instead of going through storeMessageWithDocuments.
+ chatDocs = []
for f in filesToProcess:
- chatDoc = ChatDocument(
+ chatDocs.append(ChatDocument(
id=str(uuid.uuid4()),
mandateId=self.services.mandateId or "",
featureInstanceId=featureInstanceId or "",
@@ -512,27 +515,46 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
fileName=f["fileName"],
fileSize=0,
mimeType=f["mimeType"],
+ ))
+
+ workflow = self.services.workflow
+ _wfId = getattr(workflow, "id", None) or ""
+ hasRealWorkflow = workflow is not None and bool(_wfId) and not str(_wfId).startswith("transient-")
+
+ if hasRealWorkflow:
+ chatDocDumps = [d.model_dump() for d in chatDocs]
+ messageData = {
+ "id": f"msg_extract_{uuid.uuid4().hex[:12]}",
+ "documentsLabel": "extract_files",
+ "role": "user",
+ "status": "step",
+ "message": f"Extract from {len(filesToProcess)} file(s)",
+ }
+ createdMessage = self.services.chat.storeMessageWithDocuments(
+ workflow, messageData, chatDocDumps,
)
- chatDocDumps.append(chatDoc.model_dump())
- messageData = {
- "id": f"msg_extract_{uuid.uuid4().hex[:12]}",
- "documentsLabel": "extract_files",
- "role": "user",
- "status": "step",
- "message": f"Extract from {len(filesToProcess)} file(s)",
- }
- createdMessage = self.services.chat.storeMessageWithDocuments(
- self.services.workflow,
- messageData,
- chatDocDumps,
- )
- if not createdMessage or not createdMessage.documents:
- return ActionResult.isFailure(error="Failed to attach documents to workflow")
- # Map fileId -> ChatDocument id for AI reference
- fileIdToChatDocId = {}
- for i, f in enumerate(filesToProcess):
- if i < len(createdMessage.documents):
- fileIdToChatDocId[f["fileId"]] = createdMessage.documents[i].id
+ if not createdMessage or not createdMessage.documents:
+ return ActionResult.isFailure(error="Failed to attach documents to workflow")
+ fileIdToChatDocId = {}
+ for i, f in enumerate(filesToProcess):
+ if i < len(createdMessage.documents):
+ fileIdToChatDocId[f["fileId"]] = createdMessage.documents[i].id
+ else:
+ # Graph-engine path: inject documents into the placeholder workflow so
+ # getChatDocumentsFromDocumentList can find them via workflow.messages.
+ msgId = f"msg_extract_{uuid.uuid4().hex[:12]}"
+ placeholderMsg = ChatMessage(
+ id=msgId,
+ workflowId=getattr(workflow, "id", None) or "transient",
+ documentsLabel="extract_files",
+ role="user",
+ status="step",
+ message=f"Extract from {len(filesToProcess)} file(s)",
+ documents=chatDocs,
+ )
+ if workflow is not None and hasattr(workflow, "messages"):
+ workflow.messages.append(placeholderMsg)
+ fileIdToChatDocId = {f["fileId"]: chatDocs[i].id for i, f in enumerate(filesToProcess)}
expenseList, bankList = await _getAccountLists(self, featureInstanceId)
@@ -552,16 +574,14 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult:
raw = resultDoc.documentData
data = json.loads(raw) if isinstance(raw, str) else raw
hasError = "error" in data or not data.get("extractedData")
- destSub = "error" if hasError else "processed"
+ if hasError:
+ logger.info(f"Extraction failed for {moveInfo.get('fileName', '?')} — leaving file in place")
+ return
folderPath = (moveInfo.get("folderPath") or "").strip().rstrip("/")
- destFolder = f"{folderPath}/{destSub}".strip("/") if folderPath else destSub
+ destFolder = f"{folderPath}/processed".strip("/") if folderPath else "processed"
sourceFolder = folderPath or ""
fileName = moveInfo.get("fileName") or "file"
- destFile = (
- f"{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}_{fileName}"
- if not hasError
- else fileName
- )
+ destFile = f"{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}_{fileName}"
await sharepoint.copyFileAsync(
moveInfo["siteId"], sourceFolder, fileName, destFolder, destFile
)
diff --git a/modules/workflows/methods/methodTrustee/actions/processDocuments.py b/modules/workflows/methods/methodTrustee/actions/processDocuments.py
index 3f95836d..c4bf9df1 100644
--- a/modules/workflows/methods/methodTrustee/actions/processDocuments.py
+++ b/modules/workflows/methods/methodTrustee/actions/processDocuments.py
@@ -204,6 +204,56 @@ def _recordToPosition(record: Dict[str, Any], documentId: Optional[str], feature
}
+def _resolveDocumentList(documentListParam, services) -> List[tuple]:
+ """Resolve documentList from either Graph-Editor output (list of dicts) or Chat references.
+
+ Returns list of (data_dict, fileId, fileName, mimeType) tuples.
+ """
+ results = []
+
+ if isinstance(documentListParam, list) and documentListParam:
+ first = documentListParam[0]
+ if isinstance(first, dict) and ("documentData" in first or "documentName" in first):
+ for doc in documentListParam:
+ rawData = doc.get("documentData")
+ if not rawData:
+ continue
+ try:
+ data = json.loads(rawData) if isinstance(rawData, str) else rawData
+ except (json.JSONDecodeError, TypeError):
+ continue
+ fileId = (doc.get("validationMetadata") or {}).get("fileId") or doc.get("fileId", "")
+ fileName = doc.get("documentName") or doc.get("fileName") or "document"
+ mimeType = doc.get("mimeType") or doc.get("documentMimeType") or "application/json"
+ results.append((data, fileId, fileName, mimeType))
+ if results:
+ return results
+
+ chatService = getattr(services, "chat", None)
+ if not chatService:
+ return results
+
+ try:
+ docList = DocumentReferenceList.from_string_list(
+ documentListParam if isinstance(documentListParam, list) else [documentListParam]
+ )
+ chatDocuments = chatService.getChatDocumentsFromDocumentList(docList)
+ for chatDoc in (chatDocuments or []):
+ rawBytes = chatService.getFileData(chatDoc.fileId)
+ if not rawBytes:
+ continue
+ content = rawBytes.decode("utf-8") if isinstance(rawBytes, bytes) else rawBytes
+ try:
+ data = json.loads(content) if isinstance(content, str) else content
+ except (json.JSONDecodeError, TypeError):
+ continue
+ results.append((data, chatDoc.fileId, chatDoc.fileName or "document", chatDoc.mimeType or "application/json"))
+ except Exception as e:
+ logger.debug("_resolveDocumentList chat fallback failed: %s", e)
+
+ return results
+
+
async def processDocuments(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Resolve documentList to ChatDocuments, load extraction JSON per document,
@@ -218,11 +268,8 @@ async def processDocuments(self, parameters: Dict[str, Any]) -> ActionResult:
return ActionResult.isFailure(error="featureInstanceId is required")
try:
- docList = DocumentReferenceList.from_string_list(
- documentListParam if isinstance(documentListParam, list) else [documentListParam]
- )
- chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docList)
- if not chatDocuments:
+ extractionDocs = _resolveDocumentList(documentListParam, self.services)
+ if not extractionDocs:
return ActionResult.isFailure(error="No documents found for documentList")
from modules.features.trustee.interfaceFeatureTrustee import getInterface as getTrusteeInterface
@@ -237,17 +284,11 @@ async def processDocuments(self, parameters: Dict[str, Any]) -> ActionResult:
allDocumentIds = []
autoMatchedPositionIds = []
- for chatDoc in chatDocuments:
- rawBytes = self.services.chat.getFileData(chatDoc.fileId)
- if not rawBytes:
- logger.warning(f"Could not load file {chatDoc.fileId}, skipping")
- continue
- content = rawBytes.decode("utf-8") if isinstance(rawBytes, bytes) else rawBytes
- data = json.loads(content) if isinstance(content, str) else content
+ for data, fileId, fileName, mimeType in extractionDocs:
documentType = data.get("documentType")
extractedData = data.get("extractedData")
- fileId = data.get("fileId") or chatDoc.fileId
- fileName = data.get("fileName") or chatDoc.fileName or "document"
+ fileId = data.get("fileId") or fileId
+ fileName = data.get("fileName") or fileName or "document"
records = extractedData if isinstance(extractedData, list) else [extractedData] if extractedData else []
if not records:
@@ -256,7 +297,7 @@ async def processDocuments(self, parameters: Dict[str, Any]) -> ActionResult:
docPayload = {
"fileId": fileId,
"documentName": fileName,
- "documentMimeType": chatDoc.mimeType or "application/octet-stream",
+ "documentMimeType": mimeType or "application/octet-stream",
"sourceType": "workflow",
"documentType": documentType,
}
diff --git a/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py b/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py
new file mode 100644
index 00000000..80924c39
--- /dev/null
+++ b/modules/workflows/methods/methodTrustee/actions/refreshAccountingData.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Refresh accounting data from external system (e.g. Abacus) into local TrusteeData* tables.
+Checks lastSyncAt to avoid redundant imports unless forceRefresh is set.
+"""
+
+import json
+import logging
+import time
+from typing import Dict, Any
+
+from modules.datamodels.datamodelChat import ActionResult, ActionDocument
+
+logger = logging.getLogger(__name__)
+
+_SYNC_THRESHOLD_SECONDS = 3600
+
+
+async def refreshAccountingData(self, parameters: Dict[str, Any]) -> ActionResult:
+ """Import/refresh accounting data from the configured external system.
+
+ If data was synced within the last hour and forceRefresh is not set,
+ returns cached counts without triggering an external sync.
+ """
+ featureInstanceId = parameters.get("featureInstanceId") or (
+ self.services.featureInstanceId if hasattr(self.services, "featureInstanceId") else None
+ )
+ forceRefresh = parameters.get("forceRefresh", False)
+ if isinstance(forceRefresh, str):
+ forceRefresh = forceRefresh.lower() in ("true", "1", "yes")
+ dateFrom = parameters.get("dateFrom") or None
+ dateTo = parameters.get("dateTo") or None
+
+ if not featureInstanceId:
+ return ActionResult.isFailure(error="featureInstanceId is required")
+
+ try:
+ from modules.features.trustee.interfaceFeatureTrustee import getInterface as getTrusteeInterface
+ from modules.features.trustee.datamodelFeatureTrustee import (
+ TrusteeAccountingConfig,
+ TrusteeDataAccount,
+ TrusteeDataJournalEntry,
+ TrusteeDataJournalLine,
+ TrusteeDataContact,
+ TrusteeDataAccountBalance,
+ )
+
+ trusteeInterface = getTrusteeInterface(
+ self.services.user,
+ mandateId=self.services.mandateId,
+ featureInstanceId=featureInstanceId,
+ )
+
+ cfgRecords = trusteeInterface.db.getRecordset(
+ TrusteeAccountingConfig,
+ recordFilter={"featureInstanceId": featureInstanceId, "isActive": True},
+ )
+ if not cfgRecords:
+ return ActionResult.isFailure(error="No active accounting configuration found for this Trustee instance")
+
+ cfgRecord = cfgRecords[0]
+ lastSyncAt = cfgRecord.get("lastSyncAt") or 0
+ lastSyncStatus = cfgRecord.get("lastSyncStatus") or ""
+
+ isFresh = (
+ lastSyncAt
+ and (time.time() - lastSyncAt) < _SYNC_THRESHOLD_SECONDS
+ and lastSyncStatus in ("success", "partial")
+ )
+
+ if isFresh and not forceRefresh:
+ counts = _getCachedCounts(trusteeInterface, featureInstanceId)
+ counts["synced"] = False
+ counts["lastSyncAt"] = lastSyncAt
+ counts["lastSyncStatus"] = lastSyncStatus
+ counts["message"] = f"Data is fresh (synced {int(time.time() - lastSyncAt)}s ago). Use forceRefresh=true to re-import."
+ return ActionResult.isSuccess(documents=[
+ ActionDocument(
+ documentName="refresh_result",
+ documentData=json.dumps(counts, ensure_ascii=False),
+ mimeType="application/json",
+ )
+ ])
+
+ from modules.features.trustee.accounting.accountingDataSync import AccountingDataSync
+
+ sync = AccountingDataSync(trusteeInterface)
+ summary = await sync.importData(
+ featureInstanceId=featureInstanceId,
+ mandateId=self.services.mandateId,
+ dateFrom=dateFrom,
+ dateTo=dateTo,
+ )
+ summary["synced"] = True
+ summary.pop("startedAt", None)
+ summary.pop("finishedAt", None)
+
+ try:
+ from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache
+ clearFeatureQueryCache(featureInstanceId)
+ logger.info("Cleared feature query cache for instance %s after accounting import", featureInstanceId)
+ except Exception as cacheErr:
+ logger.warning("Could not clear feature query cache: %s", cacheErr)
+
+ return ActionResult.isSuccess(documents=[
+ ActionDocument(
+ documentName="refresh_result",
+ documentData=json.dumps(summary, ensure_ascii=False),
+ mimeType="application/json",
+ )
+ ])
+ except Exception as e:
+ logger.exception("refreshAccountingData failed")
+ return ActionResult.isFailure(error=str(e))
+
+
+def _getCachedCounts(trusteeInterface, featureInstanceId: str) -> Dict[str, Any]:
+ """Count existing records per TrusteeData* table without triggering an external sync."""
+ from modules.features.trustee.datamodelFeatureTrustee import (
+ TrusteeDataAccount,
+ TrusteeDataJournalEntry,
+ TrusteeDataJournalLine,
+ TrusteeDataContact,
+ TrusteeDataAccountBalance,
+ )
+ counts = {}
+ for label, model in [
+ ("accounts", TrusteeDataAccount),
+ ("journalEntries", TrusteeDataJournalEntry),
+ ("journalLines", TrusteeDataJournalLine),
+ ("contacts", TrusteeDataContact),
+ ("accountBalances", TrusteeDataAccountBalance),
+ ]:
+ records = trusteeInterface.db.getRecordset(
+ model, recordFilter={"featureInstanceId": featureInstanceId}
+ )
+ counts[label] = len(records) if records else 0
+ return counts
diff --git a/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py b/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py
index 4633f32e..555a8623 100644
--- a/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py
+++ b/modules/workflows/methods/methodTrustee/actions/syncToAccounting.py
@@ -16,6 +16,43 @@ from modules.datamodels.datamodelDocref import DocumentReferenceList
logger = logging.getLogger(__name__)
+def _resolveFirstDocument(documentListParam, services) -> Dict[str, Any] | None:
+ """Resolve the first document from either Graph-Editor output (list of dicts) or Chat references.
+
+ Returns the parsed JSON dict or None.
+ """
+ if isinstance(documentListParam, list) and documentListParam:
+ first = documentListParam[0]
+ if isinstance(first, dict) and ("documentData" in first or "documentName" in first):
+ rawData = first.get("documentData")
+ if rawData:
+ try:
+ return json.loads(rawData) if isinstance(rawData, str) else rawData
+ except (json.JSONDecodeError, TypeError):
+ pass
+
+ chatService = getattr(services, "chat", None)
+ if not chatService:
+ return None
+
+ try:
+ docList = DocumentReferenceList.from_string_list(
+ documentListParam if isinstance(documentListParam, list) else [documentListParam]
+ )
+ chatDocuments = chatService.getChatDocumentsFromDocumentList(docList)
+ if not chatDocuments:
+ return None
+ doc = chatDocuments[0]
+ rawBytes = chatService.getFileData(doc.fileId)
+ if not rawBytes:
+ return None
+ content = rawBytes.decode("utf-8") if isinstance(rawBytes, bytes) else rawBytes
+ return json.loads(content) if isinstance(content, str) else content
+ except Exception as e:
+ logger.debug("_resolveFirstDocument chat fallback failed: %s", e)
+ return None
+
+
async def syncToAccounting(self, parameters: Dict[str, Any]) -> ActionResult:
"""
Push trustee positions to the configured accounting system.
@@ -30,21 +67,10 @@ async def syncToAccounting(self, parameters: Dict[str, Any]) -> ActionResult:
return ActionResult.isFailure(error="documentList is required (reference to processDocuments result)")
try:
- docList = DocumentReferenceList.from_string_list(
- documentListParam if isinstance(documentListParam, list) else [documentListParam]
- )
- chatDocuments = self.services.chat.getChatDocumentsFromDocumentList(docList)
- if not chatDocuments:
+ data = _resolveFirstDocument(documentListParam, self.services)
+ if data is None:
return ActionResult.isFailure(error="No document found for documentList; ensure processDocuments ran before this action")
- # Expect one document (JSON with positionIds, documentIds)
- doc = chatDocuments[0]
- rawBytes = self.services.chat.getFileData(doc.fileId)
- if not rawBytes:
- return ActionResult.isFailure(error=f"Could not load document content for fileId={doc.fileId}")
-
- content = rawBytes.decode("utf-8") if isinstance(rawBytes, bytes) else rawBytes
- data = json.loads(content) if isinstance(content, str) else content
positionIds = data.get("positionIds") or []
if not positionIds:
return ActionResult.isSuccess(documents=[
diff --git a/modules/workflows/methods/methodTrustee/methodTrustee.py b/modules/workflows/methods/methodTrustee/methodTrustee.py
index fefeaa52..5be232f8 100644
--- a/modules/workflows/methods/methodTrustee/methodTrustee.py
+++ b/modules/workflows/methods/methodTrustee/methodTrustee.py
@@ -12,6 +12,7 @@ from modules.shared.frontendTypes import FrontendType
from .actions.extractFromFiles import extractFromFiles
from .actions.processDocuments import processDocuments
from .actions.syncToAccounting import syncToAccounting
+from .actions.refreshAccountingData import refreshAccountingData
logger = logging.getLogger(__name__)
@@ -112,9 +113,46 @@ class MethodTrustee(MethodBase):
},
execute=syncToAccounting.__get__(self, self.__class__),
),
+ "refreshAccountingData": WorkflowActionDefinition(
+ actionId="trustee.refreshAccountingData",
+ description="Import/refresh accounting data from external system (e.g. Abacus) into local tables. Checks cache freshness; use forceRefresh to re-import.",
+ dynamicMode=True,
+ parameters={
+ "featureInstanceId": WorkflowActionParameter(
+ name="featureInstanceId",
+ type="str",
+ frontendType=FrontendType.TEXT,
+ required=True,
+ description="Trustee feature instance ID",
+ ),
+ "forceRefresh": WorkflowActionParameter(
+ name="forceRefresh",
+ type="bool",
+ frontendType=FrontendType.CHECKBOX,
+ required=False,
+ description="Force re-import even if data is fresh (default: false)",
+ ),
+ "dateFrom": WorkflowActionParameter(
+ name="dateFrom",
+ type="str",
+ frontendType=FrontendType.TEXT,
+ required=False,
+ description="Start date filter for journal entries (YYYY-MM-DD)",
+ ),
+ "dateTo": WorkflowActionParameter(
+ name="dateTo",
+ type="str",
+ frontendType=FrontendType.TEXT,
+ required=False,
+ description="End date filter for journal entries (YYYY-MM-DD)",
+ ),
+ },
+ execute=refreshAccountingData.__get__(self, self.__class__),
+ ),
}
self._validateActions()
self.extractFromFiles = extractFromFiles.__get__(self, self.__class__)
self.processDocuments = processDocuments.__get__(self, self.__class__)
self.syncToAccounting = syncToAccounting.__get__(self, self.__class__)
+ self.refreshAccountingData = refreshAccountingData.__get__(self, self.__class__)
diff --git a/modules/workflows/processing/modes/modeAutomation.py b/modules/workflows/processing/modes/modeAutomation.py
index 1d0121b9..f48d509e 100644
--- a/modules/workflows/processing/modes/modeAutomation.py
+++ b/modules/workflows/processing/modes/modeAutomation.py
@@ -8,7 +8,7 @@ import logging
import uuid
from typing import List, Dict, Any, Optional
from modules.datamodels.datamodelChat import (
- TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus,
+ TaskStep, TaskContext, ChatTaskResult, ActionItem, TaskStatus,
TaskPlan, ActionResult
)
from modules.datamodels.datamodelChat import ChatWorkflow
@@ -169,7 +169,7 @@ class AutomationMode(BaseMode):
return []
async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
- taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ taskIndex: int = None, totalTasks: int = None) -> ChatTaskResult:
"""
Execute task using Automation mode - executes predefined actions directly.
No AI planning or review phases - actions are executed sequentially as defined.
@@ -198,7 +198,7 @@ class AutomationMode(BaseMode):
if not actions:
logger.error(f"No actions found for task {taskIndex}, aborting")
- return TaskResult(
+ return ChatTaskResult(
taskId=taskStep.id,
status=TaskStatus.FAILED,
success=False,
@@ -266,7 +266,7 @@ class AutomationMode(BaseMode):
# Persist this action's result so next action can reference it via documentList
if getattr(self, "processor", None) and result.documents:
try:
- from modules.datamodels.datamodelWorkflow import TaskResult as WorkflowTaskResult
+ from modules.datamodels.datamodelWorkflow import WorkflowTaskResult
resultLabel = action.execResultLabel or f"action_{actionNumber}_result"
actionResultWithLabel = ActionResult(
success=result.success,
@@ -306,7 +306,7 @@ class AutomationMode(BaseMode):
taskStep, workflow, taskIndex, totalTasks, None
)
- return TaskResult(
+ return ChatTaskResult(
taskId=taskStep.id,
status=TaskStatus.COMPLETED,
success=True,
@@ -323,7 +323,7 @@ class AutomationMode(BaseMode):
taskStep, workflow, taskIndex, errorSummary
)
- return TaskResult(
+ return ChatTaskResult(
taskId=taskStep.id,
status=TaskStatus.FAILED,
success=False,
@@ -335,7 +335,7 @@ class AutomationMode(BaseMode):
logger.error(f"Error executing task {taskIndex}: {str(e)}")
await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, str(e))
- return TaskResult(
+ return ChatTaskResult(
taskId=taskStep.id,
status=TaskStatus.FAILED,
success=False,
diff --git a/modules/workflows/processing/modes/modeBase.py b/modules/workflows/processing/modes/modeBase.py
index fe9a5da6..a8a3e048 100644
--- a/modules/workflows/processing/modes/modeBase.py
+++ b/modules/workflows/processing/modes/modeBase.py
@@ -7,7 +7,7 @@ from abc import ABC, abstractmethod
import uuid
import logging
from typing import List, Dict, Any, Optional
-from modules.datamodels.datamodelChat import TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus
+from modules.datamodels.datamodelChat import TaskStep, TaskContext, ChatTaskResult, ActionItem, TaskStatus
from modules.datamodels.datamodelChat import ChatWorkflow
from modules.workflows.processing.core.taskPlanner import TaskPlanner
from modules.workflows.processing.core.actionExecutor import ActionExecutor
@@ -29,7 +29,7 @@ class BaseMode(ABC):
@abstractmethod
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> TaskResult:
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> ChatTaskResult:
"""Execute a task step - must be implemented by concrete modes"""
pass
diff --git a/modules/workflows/processing/modes/modeDynamic.py b/modules/workflows/processing/modes/modeDynamic.py
index c563a74a..67a32a64 100644
--- a/modules/workflows/processing/modes/modeDynamic.py
+++ b/modules/workflows/processing/modes/modeDynamic.py
@@ -10,7 +10,7 @@ import time
from datetime import datetime, timezone
from typing import List, Dict, Any
from modules.datamodels.datamodelChat import (
- TaskStep, TaskContext, TaskResult, ActionItem, TaskStatus,
+ TaskStep, TaskContext, ChatTaskResult, ActionItem, TaskStatus,
ActionResult, Observation, ObservationPreview, ReviewResult, ReviewContext
)
from modules.datamodels.datamodelChat import ChatWorkflow
@@ -48,7 +48,7 @@ class DynamicMode(BaseMode):
# Dynamic mode generates actions one at a time in the execution loop
return []
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> TaskResult:
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> ChatTaskResult:
"""Execute task using Dynamic mode - iterative plan-act-observe-refine loop"""
# Get task index from workflow state
@@ -335,7 +335,7 @@ class DynamicMode(BaseMode):
# Create task completion message (totalTasks not needed - removed from signature)
await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, None, completionReviewResult)
- return TaskResult(
+ return ChatTaskResult(
taskId=taskStep.id,
status=status,
success=success,
@@ -917,7 +917,7 @@ class DynamicMode(BaseMode):
'success': observation.success,
'resultLabel': observation.resultLabel,
'documentsCount': observation.documentsCount,
- 'previews': [p.model_dump(exclude_none=True) if hasattr(p, 'model_dump') else p.dict() for p in observation.previews] if observation.previews else [],
+ 'previews': [p.model_dump(exclude_none=True) for p in observation.previews] if observation.previews else [],
'notes': observation.notes,
'contentAnalysis': observation.contentAnalysis if observation.contentAnalysis else {}
}
diff --git a/modules/workflows/processing/shared/placeholderFactory.py b/modules/workflows/processing/shared/placeholderFactory.py
index 3d1a9d83..430204bd 100644
--- a/modules/workflows/processing/shared/placeholderFactory.py
+++ b/modules/workflows/processing/shared/placeholderFactory.py
@@ -48,8 +48,6 @@ def _observationToDict(obs) -> dict:
return obs.copy()
if hasattr(obs, 'model_dump'):
return obs.model_dump(exclude_none=True)
- if hasattr(obs, 'dict'):
- return obs.dict()
return {"raw": str(obs)}
diff --git a/modules/workflows/processing/workflowProcessor.py b/modules/workflows/processing/workflowProcessor.py
index 3f83379b..99d8fd63 100644
--- a/modules/workflows/processing/workflowProcessor.py
+++ b/modules/workflows/processing/workflowProcessor.py
@@ -18,7 +18,7 @@ from modules.shared.jsonUtils import extractJsonString, repairBrokenJson, parseJ
from modules.datamodels.datamodelWorkflow import UnderstandingResult
if TYPE_CHECKING:
- from modules.datamodels.datamodelWorkflow import TaskResult
+ from modules.datamodels.datamodelWorkflow import WorkflowTaskResult
logger = logging.getLogger(__name__)
@@ -109,7 +109,7 @@ class WorkflowProcessor:
self.services.chat.progressLogFinish(operationId, False)
raise
- async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> datamodelChat.TaskResult:
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext) -> datamodelChat.ChatTaskResult:
"""Execute a task step using the appropriate mode"""
import time
diff --git a/modules/workflows/scheduler/__init__.py b/modules/workflows/scheduler/__init__.py
new file mode 100644
index 00000000..e2b0f5de
--- /dev/null
+++ b/modules/workflows/scheduler/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) 2025 Patrick Motsch
+# Workflow Scheduler - consolidated scheduler with v1 incremental sync patterns
diff --git a/modules/workflows/scheduler/mainScheduler.py b/modules/workflows/scheduler/mainScheduler.py
new file mode 100644
index 00000000..651a968d
--- /dev/null
+++ b/modules/workflows/scheduler/mainScheduler.py
@@ -0,0 +1,465 @@
+# Copyright (c) 2025 Patrick Motsch
+# All rights reserved.
+"""
+Consolidated Workflow Scheduler.
+Replaces subAutomation2Schedule with v1-style incremental sync patterns:
+- eventId tracking on AutoWorkflow for change detection
+- replaceExisting=True for idempotent re-registration
+- active check before execution
+- Capped execution log
+"""
+
+import asyncio
+import logging
+from typing import Any, Dict, Optional
+
+from modules.shared.eventManagement import eventManager
+from modules.shared.i18nRegistry import resolveText
+
+logger = logging.getLogger(__name__)
+
+_main_loop = None
+
+JOB_ID_PREFIX = "graphicalEditor."
+_CALLBACK_NAME = "graphicalEditor.workflow.changed"
+
+
+def _setMainLoop(loop) -> None:
+ global _main_loop
+ _main_loop = loop
+
+
+class WorkflowScheduler:
+ """Consolidated scheduler with v1 incremental sync patterns."""
+
+ def __init__(self):
+ self._eventUser = None
+ self._registered: Dict[str, str] = {}
+
+ def start(self, eventUser) -> bool:
+ """Start scheduler: sync workflows, register callback for changes."""
+ if not eventUser:
+ logger.warning("WorkflowScheduler: No event user provided, skipping")
+ return False
+
+ self._eventUser = eventUser
+
+ try:
+ eventManager.start()
+ self._syncScheduledWorkflows()
+ logger.info("WorkflowScheduler: initial sync complete")
+
+ self._delayedSync()
+
+ from modules.shared.callbackRegistry import callbackRegistry
+ callbackRegistry.register(_CALLBACK_NAME, self._onWorkflowChanged)
+ logger.info("WorkflowScheduler: callback registered for %s", _CALLBACK_NAME)
+ except Exception as e:
+ logger.error("WorkflowScheduler: Failed to start: %s", e)
+ return False
+
+ return True
+
+ def stop(self) -> bool:
+ """Remove all scheduled workflow jobs."""
+ try:
+ self._removeAllJobs()
+ logger.info("WorkflowScheduler: all jobs removed")
+ except Exception as e:
+ logger.warning("WorkflowScheduler: error during stop: %s", e)
+ return True
+
+ def _syncScheduledWorkflows(self) -> Dict[str, Any]:
+ """
+ Incremental sync: only re-register jobs whose eventId has changed.
+ Uses AutoWorkflow.eventId for change detection (v1 pattern).
+ """
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getAllWorkflowsForScheduling
+ from modules.workflows.automation2.scheduleCron import parse_cron_to_kwargs
+
+ items = getAllWorkflowsForScheduling()
+ logger.info("WorkflowScheduler: found %d workflow(s) with trigger.schedule+cron", len(items))
+
+ newRegistered: Dict[str, str] = {}
+ activeWorkflowIds = set()
+
+ for item in items:
+ workflowId = item.get("workflowId")
+ if not workflowId:
+ continue
+
+ activeWorkflowIds.add(workflowId)
+ cron = item.get("cron")
+ mandateId = item.get("mandateId")
+ instanceId = item.get("featureInstanceId")
+
+ if not instanceId or not cron:
+ continue
+
+ jobId = f"{JOB_ID_PREFIX}{workflowId}"
+ entryPointId = item.get("entryPointId")
+ workflow = item.get("workflow") or {}
+
+ asyncHandler = self._createHandler(
+ workflowId=workflowId,
+ mandateId=mandateId,
+ instanceId=instanceId,
+ entryPointId=entryPointId,
+ workflow=workflow,
+ )
+
+ def _makeSyncWrapper(handler):
+ def syncWrapper():
+ loop = _main_loop
+ if loop and loop.is_running():
+ loop.call_soon_threadsafe(
+ lambda: asyncio.ensure_future(handler(), loop=loop)
+ )
+ else:
+ try:
+ asyncio.run(handler())
+ except RuntimeError:
+ logger.warning("WorkflowScheduler: could not run handler, no event loop")
+ return syncWrapper
+
+ syncWrapper = _makeSyncWrapper(asyncHandler)
+
+ intervalSeconds = _cronToIntervalSeconds(cron)
+ if intervalSeconds is not None:
+ eventManager.registerInterval(
+ jobId=jobId,
+ func=syncWrapper,
+ seconds=intervalSeconds,
+ replaceExisting=True,
+ )
+ else:
+ try:
+ cronKwargs = parse_cron_to_kwargs(cron)
+ eventManager.registerCron(
+ jobId=jobId,
+ func=syncWrapper,
+ cronKwargs=cronKwargs,
+ replaceExisting=True,
+ )
+ except ValueError as e:
+ logger.warning("Workflow %s: invalid cron %r: %s", workflowId, cron, e)
+ continue
+
+ newRegistered[workflowId] = jobId
+ mode = "interval" if intervalSeconds is not None else "cron"
+ logger.info(
+ "WorkflowScheduler: registered %s for workflow %s (%s=%s)",
+ jobId, workflowId, mode,
+ intervalSeconds if intervalSeconds is not None else cron,
+ )
+
+ self._updateEventId(workflow, workflowId, jobId)
+
+ staleIds = set(self._registered.keys()) - activeWorkflowIds
+ for wfId in staleIds:
+ oldJobId = self._registered[wfId]
+ try:
+ eventManager.remove(oldJobId)
+ logger.info("WorkflowScheduler: removed stale job %s", oldJobId)
+ except Exception:
+ pass
+
+ self._registered = newRegistered
+ return {"synced": len(newRegistered), "workflowsFound": len(items)}
+
+ def _updateEventId(self, workflow: Dict, workflowId: str, jobId: str) -> None:
+ """Update AutoWorkflow.eventId for incremental sync tracking (v1 pattern)."""
+ currentEventId = workflow.get("eventId")
+ if currentEventId != jobId:
+ try:
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ root = getRootInterface()
+ eventUser = root.getUserByUsername("event") if root else self._eventUser
+ if not eventUser:
+ return
+ mandateId = workflow.get("mandateId", "")
+ instanceId = workflow.get("featureInstanceId", "")
+ iface = getGraphicalEditorInterface(eventUser, mandateId, instanceId)
+ iface.updateWorkflow(workflowId, {"eventId": jobId})
+ except Exception as e:
+ logger.debug("WorkflowScheduler: could not update eventId for %s: %s", workflowId, e)
+
+ def _createHandler(
+ self,
+ workflowId: str,
+ mandateId: str,
+ instanceId: str,
+ entryPointId: str,
+ workflow: Dict[str, Any],
+ ):
+ """Create async handler for scheduled workflow execution with active-check."""
+ eventUser = self._eventUser
+
+ async def handler():
+ logger.info("WorkflowScheduler: CRON FIRED for workflow %s", workflowId)
+ try:
+ if not eventUser:
+ logger.error("WorkflowScheduler: event user not available")
+ return
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ from modules.features.graphicalEditor.mainGraphicalEditor import getGraphicalEditorServices
+ from modules.workflows.automation2.executionEngine import executeGraph
+ from modules.workflows.processing.shared.methodDiscovery import discoverMethods
+ from modules.features.graphicalEditor.entryPoints import find_invocation
+ from modules.workflows.automation2.runEnvelope import default_run_envelope, normalize_run_envelope
+
+ iface = getGraphicalEditorInterface(eventUser, mandateId, instanceId)
+ wf = iface.getWorkflow(workflowId)
+ if not wf or not wf.get("graph"):
+ logger.warning("WorkflowScheduler: workflow %s not found or no graph", workflowId)
+ return
+ if not wf.get("active", True):
+ logger.info("WorkflowScheduler: workflow %s inactive, skipping", workflowId)
+ return
+
+ inv = find_invocation(wf, entryPointId)
+ if inv and (inv.get("kind") != "schedule" or not inv.get("enabled", True)):
+ logger.info("WorkflowScheduler: entry point %s disabled for workflow %s", entryPointId, workflowId)
+ return
+
+ services = getGraphicalEditorServices(
+ eventUser,
+ mandateId=mandateId,
+ featureInstanceId=instanceId,
+ )
+ discoverMethods(services)
+
+ title = (inv or {}).get("title") or {}
+ requestLang: Optional[str] = getattr(eventUser, "language", None)
+ label = resolveText(title, requestLang) if title else ""
+
+ runEnv = default_run_envelope(
+ "schedule",
+ entry_point_id=entryPointId,
+ entry_point_label=label or None,
+ )
+ runEnv = normalize_run_envelope(runEnv, user_id=str(eventUser.id) if eventUser else None)
+
+ _wfLabel = wf.get("label") if isinstance(wf, dict) else getattr(wf, "label", None)
+
+ result = await executeGraph(
+ graph=wf["graph"],
+ services=services,
+ workflowId=workflowId,
+ instanceId=instanceId,
+ userId=None,
+ mandateId=mandateId,
+ automation2_interface=iface,
+ run_envelope=runEnv,
+ label=_wfLabel,
+ )
+ logger.info(
+ "WorkflowScheduler: executed workflow %s success=%s paused=%s",
+ workflowId, result.get("success"), result.get("paused"),
+ )
+ except Exception as e:
+ logger.exception("WorkflowScheduler: failed to execute workflow %s: %s", workflowId, e)
+
+ return handler
+
+ def _delayedSync(self) -> None:
+ """Delayed sync (5s) in case DB was not ready at startup."""
+ import threading
+
+ eventUser = self._eventUser
+
+ def _run():
+ import time
+ time.sleep(5)
+ try:
+ self._syncScheduledWorkflows()
+ logger.info("WorkflowScheduler: delayed sync done")
+ except Exception as e:
+ logger.warning("WorkflowScheduler: delayed sync failed: %s", e)
+
+ t = threading.Thread(target=_run, daemon=True)
+ t.start()
+
+ def _onWorkflowChanged(self, _context=None) -> None:
+ """Callback when a workflow is created/updated/deleted."""
+ try:
+ self._syncScheduledWorkflows()
+ logger.debug("WorkflowScheduler: re-synced after workflow change")
+ except Exception as e:
+ logger.warning("WorkflowScheduler: re-sync failed: %s", e)
+
+ def _removeAllJobs(self) -> None:
+ """Remove all registered workflow schedule jobs."""
+ if not eventManager.scheduler:
+ return
+ for job in list(eventManager.scheduler.get_jobs()):
+ jid = job.id if hasattr(job, "id") else str(job)
+ if jid.startswith(JOB_ID_PREFIX):
+ try:
+ eventManager.remove(jid)
+ except Exception as e:
+ logger.debug("Could not remove job %s: %s", jid, e)
+
+
+def _cronToIntervalSeconds(cron: str):
+ """If cron represents a simple interval, return seconds. Otherwise None."""
+ if not cron or not isinstance(cron, str):
+ return None
+ parts = cron.strip().split()
+ if len(parts) == 5:
+ minute, hour, day, month, dow = parts
+ second = "0"
+ elif len(parts) == 6:
+ second, minute, hour, day, month, dow = parts
+ else:
+ return None
+ if minute.startswith("*/") and hour == "*" and day == "*" and month == "*" and dow == "*":
+ n = int(minute[2:]) if minute[2:].isdigit() else 0
+ if n > 0:
+ return n * 60
+ if minute == "*" and hour == "*" and day == "*" and month == "*" and dow == "*" and second == "0":
+ return 60
+ if minute == "0" and hour.startswith("*/") and day == "*" and month == "*" and dow == "*":
+ n = int(hour[2:]) if hour[2:].isdigit() else 0
+ if n > 0:
+ return n * 3600
+ if len(parts) == 6 and second.startswith("*/") and minute == "*" and hour == "*" and day == "*" and month == "*" and dow in ("*", "?"):
+ n = int(second[2:]) if second[2:].isdigit() else 0
+ if n > 0:
+ return n
+ return None
+
+
+def _notifyRunFailed(workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None) -> None:
+ """Notify on workflow run failure: emit event, create in-app notification, trigger email subscription."""
+ try:
+ eventManager.emit("graphicalEditor.run.failed", {
+ "workflowId": workflowId,
+ "runId": runId,
+ "error": error,
+ "mandateId": mandateId,
+ })
+ logger.info("Emitted run.failed event for run %s (workflow %s)", runId, workflowId)
+ except Exception as e:
+ logger.warning("Failed to emit run.failed event: %s", e)
+
+ _createRunFailedNotification(workflowId, runId, error, mandateId, workflowLabel)
+ _triggerRunFailedSubscription(workflowId, runId, error, mandateId, workflowLabel)
+
+
+def _createRunFailedNotification(
+ workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None
+) -> None:
+ """Create in-app notification for the workflow creator."""
+ try:
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ from modules.datamodels.datamodelNotification import UserNotification, NotificationType, NotificationStatus
+
+ rootInterface = getRootInterface()
+ if not rootInterface:
+ return
+
+ from modules.features.graphicalEditor.interfaceFeatureGraphicalEditor import getGraphicalEditorInterface
+ eventUser = rootInterface.getUserByUsername("event")
+ if not eventUser:
+ return
+
+ iface = getGraphicalEditorInterface(eventUser, mandateId or "", "")
+ wf = iface.getWorkflow(workflowId)
+ if not wf:
+ return
+
+ creatorId = wf.get("sysCreatedBy") if isinstance(wf, dict) else getattr(wf, "sysCreatedBy", None)
+ if not creatorId:
+ return
+
+ label = workflowLabel or (wf.get("label") if isinstance(wf, dict) else getattr(wf, "label", ""))
+ notification = UserNotification(
+ userId=creatorId,
+ type=NotificationType.SYSTEM,
+ status=NotificationStatus.UNREAD,
+ title="Workflow fehlgeschlagen",
+ message=f"Workflow '{label or workflowId}' ist fehlgeschlagen: {error[:200]}",
+ referenceType="AutoRun",
+ referenceId=runId,
+ icon="alert-triangle",
+ )
+ rootInterface.db.recordCreate(
+ model_class=UserNotification,
+ record=notification.model_dump(),
+ )
+ logger.info("Created in-app notification for user %s (run %s)", creatorId, runId)
+ except Exception as e:
+ logger.warning("Failed to create in-app run.failed notification: %s", e)
+
+
+def _triggerRunFailedSubscription(
+ workflowId: str, runId: str, error: str, mandateId: str = None, workflowLabel: str = None
+) -> None:
+ """Trigger the messaging subscription for run failures (email notifications)."""
+ try:
+ from modules.serviceCenter import getService
+ from modules.serviceCenter.context import ServiceCenterContext
+ from modules.interfaces.interfaceDbApp import getRootInterface
+ from modules.datamodels.datamodelMessaging import MessagingEventParameters
+
+ rootInterface = getRootInterface()
+ if not rootInterface:
+ return
+ eventUser = rootInterface.getUserByUsername("event")
+ if not eventUser:
+ return
+
+ ctx = ServiceCenterContext(
+ user=eventUser,
+ mandate_id=mandateId or "",
+ feature_instance_id="",
+ feature_code="graphicalEditor",
+ )
+ messagingService = getService("messaging", ctx)
+
+ subscriptionId = "GraphicalEditorRunFailed"
+ eventParams = MessagingEventParameters(triggerData={
+ "workflowId": workflowId,
+ "workflowLabel": workflowLabel or workflowId,
+ "runId": runId,
+ "error": error,
+ "mandateId": mandateId or "",
+ })
+ result = messagingService.executeSubscription(subscriptionId, eventParams)
+ logger.info(
+ "Triggered run.failed subscription: sent=%d success=%s",
+ result.messagesSent, result.success,
+ )
+ except FileNotFoundError:
+ logger.debug("Subscription function GraphicalEditorRunFailed not found (not yet registered)")
+ except ValueError as e:
+ logger.debug("Subscription GraphicalEditorRunFailed: %s", e)
+ except Exception as e:
+ logger.warning("Failed to trigger run.failed subscription: %s", e)
+
+
+# Module-level singleton
+_scheduler = WorkflowScheduler()
+
+
+def start(eventUser) -> bool:
+ """Start the consolidated workflow scheduler."""
+ return _scheduler.start(eventUser)
+
+
+def stop() -> bool:
+ """Stop the consolidated workflow scheduler."""
+ return _scheduler.stop()
+
+
+def syncNow() -> dict:
+ """Trigger an immediate incremental sync. Used by /schedule-sync endpoint."""
+ return _scheduler._syncScheduledWorkflows()
+
+
+def setMainLoop(loop) -> None:
+ """Set the main event loop for thread-bridge."""
+ _setMainLoop(loop)
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index 3fa6a373..379283b8 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -942,7 +942,7 @@ The following is the user's original input message. Analyze intent, normalize th
# Persist task result for cross-task/round document references
# Convert ChatTaskResult to WorkflowTaskResult for persistence
- from modules.datamodels.datamodelWorkflow import TaskResult as WorkflowTaskResult
+ from modules.datamodels.datamodelWorkflow import WorkflowTaskResult
# Get final ActionResult from task execution (last action result)
finalActionResult = None
@@ -952,7 +952,6 @@ The following is the user's original input message. Analyze intent, normalize th
# Use last action result from context
finalActionResult = taskContext.previousActionResults[-1]
- # Create WorkflowTaskResult for persistence
if finalActionResult:
workflowTaskResult = WorkflowTaskResult(
taskId=taskStep.id,
diff --git a/scripts/build_ui_language_seed_json.py b/scripts/build_ui_language_seed_json.py
new file mode 100644
index 00000000..e610ea11
--- /dev/null
+++ b/scripts/build_ui_language_seed_json.py
@@ -0,0 +1,100 @@
+"""Build ui_language_seed.json from frontend_nyla locale TS files (one-off / CI)."""
+
+from __future__ import annotations
+
+import json
+import re
+from pathlib import Path
+
+_REPO = Path(__file__).resolve().parents[2]
+_SRC = _REPO / "frontend_nyla" / "src" / "locales"
+_OUT = _REPO / "gateway" / "modules" / "migration" / "seedData" / "ui_language_seed.json"
+
+
+def _unescape_ts_single_quoted(raw: str) -> str:
+ out: list[str] = []
+ i = 0
+ while i < len(raw):
+ c = raw[i]
+ if c == "\\" and i + 1 < len(raw):
+ n = raw[i + 1]
+ if n == "n":
+ out.append("\n")
+ i += 2
+ continue
+ if n == "r":
+ out.append("\r")
+ i += 2
+ continue
+ if n == "t":
+ out.append("\t")
+ i += 2
+ continue
+ out.append(n)
+ i += 2
+ continue
+ out.append(c)
+ i += 1
+ return "".join(out)
+
+
+def _parse_locale(path: Path) -> dict[str, str]:
+ text = path.read_text(encoding="utf-8")
+ mapping: dict[str, str] = {}
+ line_re = re.compile(
+ r"^\s*'((?:\\.|[^'])*)':\s*'((?:\\.|[^'])*)'\s*,?\s*(//.*)?$"
+ )
+ for line in text.splitlines():
+ m = line_re.match(line.strip())
+ if not m:
+ continue
+ key = _unescape_ts_single_quoted(m.group(1))
+ val = _unescape_ts_single_quoted(m.group(2))
+ mapping[key] = val
+ return mapping
+
+
+def main() -> None:
+ deMap = _parse_locale(_SRC / "de.ts")
+ enMap = _parse_locale(_SRC / "en.ts")
+ frMap = _parse_locale(_SRC / "fr.ts")
+
+ dePlain = {v: v for v in deMap.values()}
+ enPlain: dict[str, str] = {}
+ frPlain: dict[str, str] = {}
+ for dotKey, germanText in deMap.items():
+ if dotKey in enMap:
+ enPlain[germanText] = enMap[dotKey]
+ if dotKey in frMap:
+ frPlain[germanText] = frMap[dotKey]
+
+ payload = [
+ {
+ "id": "de",
+ "label": "Deutsch",
+ "keys": dePlain,
+ "status": "complete",
+ "isDefault": True,
+ },
+ {
+ "id": "en",
+ "label": "English",
+ "keys": enPlain,
+ "status": "complete",
+ "isDefault": False,
+ },
+ {
+ "id": "fr",
+ "label": "Français",
+ "keys": frPlain,
+ "status": "complete",
+ "isDefault": False,
+ },
+ ]
+ _OUT.parent.mkdir(parents=True, exist_ok=True)
+ _OUT.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8")
+ print("Wrote", _OUT, "keys de/en/fr", len(dePlain), len(enPlain), len(frPlain))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/function_imports_analysis.txt b/scripts/function_imports_analysis.txt
deleted file mode 100644
index a3909ed4..00000000
--- a/scripts/function_imports_analysis.txt
+++ /dev/null
@@ -1,444 +0,0 @@
-================================================================================
-FUNCTION IMPORTS ANALYSIS
-================================================================================
-
-Total function imports (internal modules): 229
- - CIRCULAR (must stay): 4
- - REDUNDANT (can remove): 0
- - MOVABLE (can move): 225
-
-
-================================================================================
-MOVABLE TO HEADER (grouped by source module)
-These imports could potentially be moved to the module header.
-================================================================================
-
-gateway.app
------------
- [lifespan] modules.shared.auditLogger
-
-gateway.modules.auth.authentication
------------------------------------
- [requireSysAdmin] modules.shared.auditLogger
-
-gateway.modules.auth.tokenManager
----------------------------------
- [getFreshToken] modules.interfaces.interfaceDbApp
- [getFreshToken] modules.security.rootAccess
-
-gateway.modules.auth.tokenRefreshService
-----------------------------------------
- [_refresh_google_token] modules.auth.tokenManager
- [_refresh_microsoft_token] modules.auth.tokenManager
- [proactive_refresh] modules.interfaces.interfaceDbApp
- [refresh_expired_tokens] modules.interfaces.interfaceDbApp
- [proactive_refresh] modules.security.rootAccess
- [refresh_expired_tokens] modules.security.rootAccess
-
-gateway.modules.datamodels.datamodelChat
-----------------------------------------
- [updateFromSelection] modules.datamodels.datamodelWorkflow
-
-gateway.modules.features.aichat.mainAiChat
-------------------------------------------
- [onStart] modules.aicore.aicoreModelRegistry
-
-gateway.modules.features.automation.routeFeatureAutomation
-----------------------------------------------------------
- [execute_automation] modules.services
-
-gateway.modules.features.chatbot.datamodelFeatureChatbot
---------------------------------------------------------
- [updateFromSelection] modules.datamodels.datamodelWorkflow
-
-gateway.modules.features.chatbot.interfaceFeatureChatbot
---------------------------------------------------------
- [createLog] modules.features.chatbot.eventManager
- [createMessage] modules.features.chatbot.eventManager
- [_enrichAutomationsWithUserAndMandate] modules.interfaces.interfaceDbApp
- [storeDebugMessageAndDocuments] modules.interfaces.interfaceDbManagement
- [setUserContext] modules.security.rootAccess
- [_notifyAutomationChanged] modules.shared.callbackRegistry
- [storeDebugMessageAndDocuments] modules.shared.debugLogger
- [deleteAutomationDefinition] modules.shared.eventManagement
-
-gateway.modules.features.chatbot.mainChatbot
---------------------------------------------
- [_convert_file_ids_to_document_references] modules.interfaces.interfaceRbac
-
-gateway.modules.features.neutralizer.mainNeutralizePlayground
--------------------------------------------------------------
- [processSharepointFiles] modules.services.serviceSharepoint.mainServiceSharepoint
-
-gateway.modules.features.realestate.interfaceFeatureRealEstate
---------------------------------------------------------------
- [setUserContext] modules.security.rootAccess
-
-gateway.modules.features.realestate.mainRealEstate
---------------------------------------------------
- [executeIntentBasedOperation] modules.features.realestate.datamodelFeatureRealEstate
-
-gateway.modules.features.trustee.interfaceFeatureTrustee
---------------------------------------------------------
- [setUserContext] modules.security.rootAccess
-
-gateway.modules.interfaces.interfaceBootstrap
----------------------------------------------
- [_applyDatabaseOptimizations] modules.shared.dbMultiTenantOptimizations
-
-gateway.modules.interfaces.interfaceDbApp
------------------------------------------
- [getRootInterface] modules.security.rootAccess
-
-gateway.modules.interfaces.interfaceDbChat
-------------------------------------------
- [_enrichAutomationsWithUserAndMandate] modules.interfaces.interfaceDbApp
- [storeDebugMessageAndDocuments] modules.interfaces.interfaceDbManagement
- [setUserContext] modules.security.rootAccess
- [_notifyAutomationChanged] modules.shared.callbackRegistry
- [storeDebugMessageAndDocuments] modules.shared.debugLogger
-
-gateway.modules.interfaces.interfaceDbManagement
-------------------------------------------------
- [_initializeStandardPrompts] modules.interfaces.interfaceDbApp
- [_initializeStandardPrompts] modules.security.rootAccess
- [setUserContext] modules.security.rootAccess
-
-gateway.modules.interfaces.interfaceFeatures
---------------------------------------------
- [syncRolesFromTemplate] modules.datamodels.datamodelMembership
-
-gateway.modules.interfaces.interfaceRbac
-----------------------------------------
- [getRecordsetWithRBAC] modules.connectors.connectorDbPostgre
-
-gateway.modules.interfaces.interfaceTicketObjects
--------------------------------------------------
- [createTicketInterfaceByType] modules.connectors.connectorTicketsClickup
- [createTicketInterfaceByType] modules.connectors.connectorTicketsJira
-
-gateway.modules.routes.routeAdminAutomationEvents
--------------------------------------------------
- [sync_all_automation_events] modules.interfaces.interfaceDbApp
- [sync_all_automation_events] modules.services
- [get_all_automation_events] modules.shared.eventManagement
- [remove_event] modules.shared.eventManagement
- [sync_all_automation_events] modules.workflows.automation
-
-gateway.modules.routes.routeAdminFeatures
------------------------------------------
- [_getInstancePermissions] modules.datamodels.datamodelMembership
- [_getUserRoleInInstance] modules.datamodels.datamodelMembership
- [addUserToFeatureInstance] modules.datamodels.datamodelMembership
- [listFeatureInstanceUsers] modules.datamodels.datamodelMembership
- [removeUserFromFeatureInstance] modules.datamodels.datamodelMembership
- [updateFeatureInstanceUserRoles] modules.datamodels.datamodelMembership
- [_getInstancePermissions] modules.datamodels.datamodelRbac
- [_getUserRoleInInstance] modules.datamodels.datamodelRbac
- [_hasMandateAdminRole] modules.datamodels.datamodelRbac
- [getFeatureInstanceAvailableRoles] modules.datamodels.datamodelRbac
- [listFeatureInstanceUsers] modules.datamodels.datamodelRbac
-
-gateway.modules.routes.routeDataUsers
--------------------------------------
- [delete_user] modules.datamodels.datamodelMembership
- [get_user] modules.datamodels.datamodelMembership
- [reset_user_password] modules.datamodels.datamodelMembership
- [sendPasswordLink] modules.datamodels.datamodelMembership
- [update_user] modules.datamodels.datamodelMembership
- [sendPasswordLink] modules.services
- [change_password] modules.shared.auditLogger
- [reset_user_password] modules.shared.auditLogger
- [sendPasswordLink] modules.shared.auditLogger
- [sendPasswordLink] modules.shared.configuration
-
-gateway.modules.routes.routeDataWorkflows
------------------------------------------
- [get_action_schema] modules.services
- [get_all_actions] modules.services
- [get_method_actions] modules.services
- [get_action_schema] modules.workflows.processing.shared.methodDiscovery
- [get_all_actions] modules.workflows.processing.shared.methodDiscovery
- [get_method_actions] modules.workflows.processing.shared.methodDiscovery
-
-gateway.modules.routes.routeGdpr
---------------------------------
- [exportUserData] modules.datamodels.datamodelFeatures
- [deleteAccount] modules.datamodels.datamodelInvitation
- [exportUserData] modules.datamodels.datamodelInvitation
- [deleteAccount] modules.datamodels.datamodelMembership
- [exportPortableData] modules.datamodels.datamodelMembership
- [exportUserData] modules.datamodels.datamodelMembership
- [deleteAccount] modules.datamodels.datamodelSecurity
-
-gateway.modules.routes.routeInvitations
----------------------------------------
- [createInvitation] modules.datamodels.datamodelFeatures
- [_hasMandateAdminRole] modules.datamodels.datamodelRbac
- [_isInstanceRole] modules.datamodels.datamodelRbac
- [createInvitation] modules.datamodels.datamodelRbac
- [registerAndAcceptInvitation] modules.security.passwordUtils
- [createInvitation] modules.shared.configuration
- [listInvitations] modules.shared.configuration
-
-gateway.modules.routes.routeMessaging
--------------------------------------
- [_hasTriggerPermission] modules.interfaces.interfaceDbApp
- [triggerSubscription] modules.services
-
-gateway.modules.routes.routeSecurityAdmin
------------------------------------------
- [revoke_tokens_by_mandate] modules.datamodels.datamodelMembership
-
-gateway.modules.routes.routeSecurityGoogle
-------------------------------------------
- [auth_callback] modules.datamodels.datamodelSecurity
- [logout] modules.shared.auditLogger
-
-gateway.modules.routes.routeSecurityLocal
------------------------------------------
- [_sendAuthEmail] modules.datamodels.datamodelMessaging
- [_sendAuthEmail] modules.interfaces.interfaceMessaging
- [login] modules.shared.auditLogger
- [logout] modules.shared.auditLogger
- [passwordReset] modules.shared.auditLogger
-
-gateway.modules.routes.routeSecurityMsft
-----------------------------------------
- [logout] modules.shared.auditLogger
-
-gateway.modules.security.rootAccess
------------------------------------
- [_ensureBootstrap] modules.interfaces.interfaceBootstrap
-
-gateway.modules.services.__init__
----------------------------------
- [__init__] modules.interfaces.interfaceDbApp
- [__init__] modules.interfaces.interfaceDbChat
- [__init__] modules.interfaces.interfaceDbManagement
-
-gateway.modules.services.serviceAi.mainAiChat
----------------------------------------------
- [onStart] modules.aicore.aicoreModelRegistry
-
-gateway.modules.services.serviceAi.mainServiceAi
-------------------------------------------------
- [renderResult] modules.services.serviceGeneration.mainServiceGeneration
- [_handleCodeGeneration] modules.services.serviceGeneration.paths.codePath
- [_handleDocumentGeneration] modules.services.serviceGeneration.paths.documentPath
- [_handleImageGeneration] modules.services.serviceGeneration.paths.imagePath
-
-gateway.modules.services.serviceAi.subContentExtraction
--------------------------------------------------------
- [extractTextFromImage] modules.datamodels.datamodelAi
- [processTextContentWithAi] modules.datamodels.datamodelAi
-
-gateway.modules.services.serviceAi.subJsonResponseHandling
-----------------------------------------------------------
- [mergeFragmentIntoSection] modules.shared.debugLogger
-
-gateway.modules.services.serviceAi.subStructureFilling
-------------------------------------------------------
- [_getAcceptedSectionTypesForFormat] modules.datamodels.datamodelJson
- [_getAcceptedSectionTypesForFormat] modules.services.serviceGeneration.renderers.registry
- [buildSectionPromptWithContinuation] modules.shared.jsonContinuation
- [_extractAndMergeMultipleJsonBlocks] modules.shared.jsonUtils
- [_processAiResponseForSection] modules.shared.jsonUtils
- [_processSingleSection] modules.shared.jsonUtils
-
-gateway.modules.services.serviceAi.subStructureGeneration
----------------------------------------------------------
- [generateStructure] modules.services.serviceGeneration.renderers.registry
- [generateStructure] modules.shared
- [generateStructure] modules.shared.jsonContinuation
-
-gateway.modules.services.serviceChat.mainServiceChat
-----------------------------------------------------
- [getChatDocumentsFromDocumentList] modules.datamodels.datamodelDocref
-
-gateway.modules.services.serviceExtraction.mainServiceExtraction
-----------------------------------------------------------------
- [extractContent] modules.interfaces.interfaceDbManagement
- [extractContent] modules.shared.debugLogger
-
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction
----------------------------------------------------------------------
- [buildExtractionPrompt] modules.shared.debugLogger
-
-gateway.modules.services.serviceGeneration.mainServiceGeneration
-----------------------------------------------------------------
- [getAdaptiveExtractionPrompt] modules.services.serviceExtraction.subPromptBuilderExtraction
- [renderReport] modules.services.serviceGeneration.renderers.registry
- [generateDocumentWithTwoPhases] modules.services.serviceGeneration.subContentGenerator
- [generateDocumentWithTwoPhases] modules.services.serviceGeneration.subStructureGenerator
-
-gateway.modules.services.serviceGeneration.paths.codePath
----------------------------------------------------------
- [generateCode] modules.datamodels.datamodelDocument
- [_getCodeRenderer] modules.services.serviceGeneration.renderers.registry
- [_generateCodeStructure] modules.shared.jsonContinuation
- [_generateSingleFileContent] modules.shared.jsonContinuation
-
-gateway.modules.services.serviceGeneration.renderers.rendererDocx
------------------------------------------------------------------
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererHtml
------------------------------------------------------------------
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererImage
-------------------------------------------------------------------
- [_compressPromptWithAi] modules.datamodels.datamodelAi
- [_generateAiImage] modules.datamodels.datamodelAi
-
-gateway.modules.services.serviceGeneration.renderers.rendererJson
------------------------------------------------------------------
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererMarkdown
----------------------------------------------------------------------
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererPdf
-----------------------------------------------------------------
- [_getAiStylesWithPdfColors] modules.datamodels.datamodelAi
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererPptx
------------------------------------------------------------------
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererText
------------------------------------------------------------------
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx
------------------------------------------------------------------
- [_getAiStylesWithExcelColors] modules.datamodels.datamodelAi
- [getAcceptedSectionTypes] modules.datamodels.datamodelJson
-
-gateway.modules.services.serviceGeneration.subContentGenerator
---------------------------------------------------------------
- [_generateImageSection] modules.datamodels.datamodelAi
- [_generateSimpleSection] modules.datamodels.datamodelAi
- [_generateSimpleSection] modules.shared.jsonUtils
-
-gateway.modules.services.serviceGeneration.subStructureGenerator
-----------------------------------------------------------------
- [generateStructure] modules.datamodels.datamodelAi
-
-gateway.modules.services.serviceUtils.mainServiceUtils
-------------------------------------------------------
- [storeDebugMessageAndDocuments] modules.interfaces.interfaceDbChat
- [debugLogToFile] modules.shared.debugLogger
- [writeDebugArtifact] modules.shared.debugLogger
- [writeDebugFile] modules.shared.debugLogger
-
-gateway.modules.shared.auditLogger
-----------------------------------
- [_ensureInitialized] modules.datamodels.datamodelAudit
- [cleanupOldEntries] modules.datamodels.datamodelAudit
- [getAuditLogs] modules.datamodels.datamodelAudit
- [logEvent] modules.datamodels.datamodelAudit
- [registerAuditLogCleanupScheduler] modules.shared.eventManagement
-
-gateway.modules.shared.debugLogger
-----------------------------------
- [debugLogToFile] modules.shared.timeUtils
-
-gateway.modules.shared.jsonUtils
---------------------------------
- [buildContinuationContext] modules.shared.jsonContinuation
-
-gateway.modules.workflows.automation.subAutomationSchedule
-----------------------------------------------------------
- [start] modules.shared.callbackRegistry
- [start] modules.workflows.automation
-
-gateway.modules.workflows.methods.methodAi.actions.generateCode
----------------------------------------------------------------
- [generateCode] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodAi.actions.generateDocument
--------------------------------------------------------------------
- [generateDocument] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodAi.actions.process
-----------------------------------------------------------
- [process] modules.datamodels.datamodelDocref
- [process] modules.datamodels.datamodelWorkflow
-
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase
----------------------------------------------------------------------
- [queryDatabase] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext
----------------------------------------------------------------------------------------
- [composeAndDraftEmailWithContext] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail
-----------------------------------------------------------------------
- [sendDraftEmail] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodSharepoint.actions.copyFile
--------------------------------------------------------------------
- [copyFile] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath
------------------------------------------------------------------------------
- [downloadFileByPath] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile
----------------------------------------------------------------------
- [uploadFile] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.methods.methodSharepoint.helpers.documentParsing
---------------------------------------------------------------------------
- [parseDocumentListForFolder] modules.datamodels.datamodelDocref
- [parseDocumentListForFoundDocuments] modules.datamodels.datamodelDocref
-
-gateway.modules.workflows.processing.core.actionExecutor
---------------------------------------------------------
- [_createActionCompletionMessage] modules.workflows.processing.core.messageCreator
-
-gateway.modules.workflows.processing.modes.modeDynamic
-------------------------------------------------------
- [_actExecute] modules.datamodels.datamodelAi
- [_planSelect] modules.datamodels.datamodelAi
- [_refineDecide] modules.datamodels.datamodelAi
- [_actExecute] modules.datamodels.datamodelDocref
- [_planSelect] modules.datamodels.datamodelDocref
- [_actExecute] modules.datamodels.datamodelWorkflow
- [_planSelect] modules.datamodels.datamodelWorkflow
- [_actExecute] modules.shared.jsonUtils
- [_planSelect] modules.shared.jsonUtils
- [_refineDecide] modules.shared.jsonUtils
- [_actExecute] modules.workflows.processing.shared.methodDiscovery
-
-gateway.modules.workflows.processing.shared.placeholderFactory
---------------------------------------------------------------
- [extractReviewContent] modules.datamodels.datamodelChat
- [extractLatestRefinementFeedback] modules.interfaces.interfaceDbApp
- [extractLatestRefinementFeedback] modules.interfaces.interfaceDbChat
-
-gateway.modules.workflows.workflowManager
------------------------------------------
- [_executeTasks] modules.datamodels.datamodelWorkflow
- [workflowStart] modules.workflows.processing.shared.methodDiscovery
- [_checkIfHistoryAvailable] modules.workflows.processing.shared.placeholderFactory
-
-
-================================================================================
-CIRCULAR DEPENDENCY (must stay in function)
-================================================================================
-
-gateway.modules.shared.auditLogger
-----------------------------------
- [_ensureInitialized] modules.connectors.connectorDbPostgre
-
-gateway.modules.shared.configuration
-------------------------------------
- [decryptValue] modules.shared.auditLogger
- [encryptValue] modules.shared.auditLogger
- [get] modules.shared.auditLogger
\ No newline at end of file
diff --git a/scripts/i18n_rekey_plaintext_keys.py b/scripts/i18n_rekey_plaintext_keys.py
new file mode 100644
index 00000000..cf0e7362
--- /dev/null
+++ b/scripts/i18n_rekey_plaintext_keys.py
@@ -0,0 +1,136 @@
+"""
+Rekey frontend t('dot.notation') -> t('Deutscher Klartext') using locales/de.ts mapping.
+
+Usage (from repo root):
+ python gateway/scripts/i18n_rekey_plaintext_keys.py
+
+Excludes: src/locales/, this script's output is in-place file edits.
+"""
+
+from __future__ import annotations
+
+import re
+import sys
+from pathlib import Path
+
+
+_REPO = Path(__file__).resolve().parents[2]
+_SRC = _REPO / "frontend_nyla" / "src"
+_DE_FILE = _SRC / "locales" / "de.ts"
+
+
+def _unescape_ts_single_quoted(raw: str) -> str:
+ out: list[str] = []
+ i = 0
+ while i < len(raw):
+ c = raw[i]
+ if c == "\\" and i + 1 < len(raw):
+ n = raw[i + 1]
+ if n == "n":
+ out.append("\n")
+ i += 2
+ continue
+ if n == "r":
+ out.append("\r")
+ i += 2
+ continue
+ if n == "t":
+ out.append("\t")
+ i += 2
+ continue
+ out.append(n)
+ i += 2
+ continue
+ out.append(c)
+ i += 1
+ return "".join(out)
+
+
+def _escape_for_ts_single_quoted(s: str) -> str:
+ return (
+ s.replace("\\", "\\\\")
+ .replace("'", "\\'")
+ .replace("\n", "\\n")
+ .replace("\r", "\\r")
+ .replace("\t", "\\t")
+ )
+
+
+def _parse_de_ts(path: Path) -> dict[str, str]:
+ text = path.read_text(encoding="utf-8")
+ mapping: dict[str, str] = {}
+ line_re = re.compile(
+ r"^\s*'((?:\\.|[^'])*)':\s*'((?:\\.|[^'])*)'\s*,?\s*(//.*)?$"
+ )
+ for line in text.splitlines():
+ m = line_re.match(line.strip())
+ if not m:
+ continue
+ key = _unescape_ts_single_quoted(m.group(1))
+ val = _unescape_ts_single_quoted(m.group(2))
+ mapping[key] = val
+ return mapping
+
+
+def _iter_source_files():
+ for ext in ("*.tsx", "*.ts"):
+ for p in _SRC.rglob(ext):
+ rel = p.relative_to(_SRC).as_posix()
+ if rel.startswith("locales/"):
+ continue
+ yield p
+
+
+def _rekey_content(content: str, mapping: dict[str, str]) -> tuple[str, int]:
+ changes = 0
+ keys = sorted(mapping.keys(), key=len, reverse=True)
+ for key in keys:
+ if f"'{key}'" not in content:
+ continue
+ german = mapping[key]
+ escaped = _escape_for_ts_single_quoted(german)
+ repl_single = f"t('{escaped}')"
+ key_re = re.escape(key)
+
+ # t('key', "..." )
+ content, c = re.subn(
+ rf"t\(\s*'{key_re}'\s*,\s*\"(?:\\.|[^\"])*\"\s*\)",
+ repl_single,
+ content,
+ )
+ changes += c
+
+ # t('key', '...' )
+ content, c = re.subn(
+ rf"t\(\s*'{key_re}'\s*,\s*'(?:\\.|[^'])*'\s*\)",
+ repl_single,
+ content,
+ )
+ changes += c
+
+ # t('key')
+ content, c = re.subn(rf"t\(\s*'{key_re}'\s*\)", repl_single, content)
+ changes += c
+ return content, changes
+
+
+def main() -> int:
+ if not _DE_FILE.is_file():
+ print("Missing", _DE_FILE, file=sys.stderr)
+ return 1
+ mapping = _parse_de_ts(_DE_FILE)
+ print("Loaded", len(mapping), "entries from de.ts")
+ total = 0
+ for path in _iter_source_files():
+ raw = path.read_text(encoding="utf-8")
+ new_raw, n = _rekey_content(raw, mapping)
+ if n and new_raw != raw:
+ path.write_text(new_raw, encoding="utf-8", newline="\n")
+ print(path.relative_to(_REPO), n, "replacements")
+ total += n
+ print("Done. Total replacements:", total)
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/scripts/import_analysis.csv b/scripts/import_analysis.csv
deleted file mode 100644
index 5fbe59c0..00000000
--- a/scripts/import_analysis.csv
+++ /dev/null
@@ -1,2685 +0,0 @@
-module_name,imported_module_name,position,import_valid
-gateway.app,contextlib,header,Yes
-gateway.app,datetime,header,Yes
-gateway.app,fastapi,header,Yes
-gateway.app,fastapi.middleware.cors,header,Yes
-gateway.app,fastapi.openapi.utils,function customOpenapi,Yes
-gateway.app,fastapi.security,header,Yes
-gateway.app,logging,header,Yes
-gateway.app,logging.handlers,header,Yes
-gateway.app,modules.auth,header,Yes
-gateway.app,modules.auth,header,Yes
-gateway.app,modules.features.featureRegistry,header,Yes
-gateway.app,modules.features.featureRegistry,header,Yes
-gateway.app,modules.interfaces.interfaceDbApp,header,Yes
-gateway.app,modules.routes.routeAdmin,header,Yes
-gateway.app,modules.routes.routeAdminAutomationEvents,header,Yes
-gateway.app,modules.routes.routeAdminFeatures,header,Yes
-gateway.app,modules.routes.routeAdminRbacExport,header,Yes
-gateway.app,modules.routes.routeAdminRbacRules,header,Yes
-gateway.app,modules.routes.routeAttributes,header,Yes
-gateway.app,modules.routes.routeChat,header,Yes
-gateway.app,modules.routes.routeDataConnections,header,Yes
-gateway.app,modules.routes.routeDataFiles,header,Yes
-gateway.app,modules.routes.routeDataMandates,header,Yes
-gateway.app,modules.routes.routeDataPrompts,header,Yes
-gateway.app,modules.routes.routeDataUsers,header,Yes
-gateway.app,modules.routes.routeDataWorkflows,header,Yes
-gateway.app,modules.routes.routeGdpr,header,Yes
-gateway.app,modules.routes.routeInvitations,header,Yes
-gateway.app,modules.routes.routeMessaging,header,Yes
-gateway.app,modules.routes.routeSecurityAdmin,header,Yes
-gateway.app,modules.routes.routeSecurityGoogle,header,Yes
-gateway.app,modules.routes.routeSecurityLocal,header,Yes
-gateway.app,modules.routes.routeSecurityMsft,header,Yes
-gateway.app,modules.routes.routeSharepoint,header,Yes
-gateway.app,modules.routes.routeVoiceGoogle,header,Yes
-gateway.app,modules.shared.auditLogger,function lifespan,Yes
-gateway.app,modules.shared.configuration,header,Yes
-gateway.app,modules.shared.eventManagement,header,Yes
-gateway.app,modules.workflows.automation,header,Yes
-gateway.app,os,header,Yes
-gateway.app,sys,header,Yes
-gateway.app,unicodedata,header,Yes
-gateway.app,urllib.parse,header,Yes
-gateway.modules.aicore.aicoreBase,abc,header,Yes
-gateway.modules.aicore.aicoreBase,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicoreBase,time,function getCachedModels,Yes
-gateway.modules.aicore.aicoreBase,typing,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,(relative) .aicoreBase,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,importlib,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,logging,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,modules.security.rbac,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,modules.security.rbacHelpers,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,os,header,Yes
-gateway.modules.aicore.aicoreModelRegistry,time,function refreshModels,Yes
-gateway.modules.aicore.aicoreModelRegistry,typing,header,Yes
-gateway.modules.aicore.aicoreModelSelector,logging,header,Yes
-gateway.modules.aicore.aicoreModelSelector,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicoreModelSelector,typing,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,(relative) .aicoreBase,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,base64,function callAiImage,Yes
-gateway.modules.aicore.aicorePluginAnthropic,fastapi,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,httpx,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,logging,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,modules.shared.configuration,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,os,header,Yes
-gateway.modules.aicore.aicorePluginAnthropic,time,function callAiImage,Yes
-gateway.modules.aicore.aicorePluginAnthropic,typing,header,Yes
-gateway.modules.aicore.aicorePluginInternal,(relative) .aicoreBase,header,Yes
-gateway.modules.aicore.aicorePluginInternal,logging,header,Yes
-gateway.modules.aicore.aicorePluginInternal,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicorePluginInternal,typing,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,(relative) .aicoreBase,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,fastapi,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,httpx,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,json,function generateImage,Yes
-gateway.modules.aicore.aicorePluginOpenai,logging,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,modules.shared.configuration,header,Yes
-gateway.modules.aicore.aicorePluginOpenai,typing,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,(relative) .aicoreBase,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,fastapi,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,httpx,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,json,function webSearch,Yes
-gateway.modules.aicore.aicorePluginPerplexity,json,function webCrawl,Yes
-gateway.modules.aicore.aicorePluginPerplexity,json,function webCrawl,Yes
-gateway.modules.aicore.aicorePluginPerplexity,logging,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,modules.datamodels.datamodelTools,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,modules.shared.configuration,header,Yes
-gateway.modules.aicore.aicorePluginPerplexity,typing,header,Yes
-gateway.modules.aicore.aicorePluginTavily,(relative) .aicoreBase,header,Yes
-gateway.modules.aicore.aicorePluginTavily,asyncio,header,Yes
-gateway.modules.aicore.aicorePluginTavily,dataclasses,header,Yes
-gateway.modules.aicore.aicorePluginTavily,json,function webSearch,Yes
-gateway.modules.aicore.aicorePluginTavily,json,function webSearch,Yes
-gateway.modules.aicore.aicorePluginTavily,json,function webCrawl,Yes
-gateway.modules.aicore.aicorePluginTavily,json,function webCrawl,Yes
-gateway.modules.aicore.aicorePluginTavily,json,function webSearch,Yes
-gateway.modules.aicore.aicorePluginTavily,json,function webCrawl,Yes
-gateway.modules.aicore.aicorePluginTavily,logging,header,Yes
-gateway.modules.aicore.aicorePluginTavily,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.aicore.aicorePluginTavily,modules.datamodels.datamodelTools,header,Yes
-gateway.modules.aicore.aicorePluginTavily,modules.shared.configuration,header,Yes
-gateway.modules.aicore.aicorePluginTavily,re,header,Yes
-gateway.modules.aicore.aicorePluginTavily,re,function _cleanUrl,Yes
-gateway.modules.aicore.aicorePluginTavily,tavily,header,Yes
-gateway.modules.aicore.aicorePluginTavily,typing,header,Yes
-gateway.modules.aicore.aicorePluginTavily,urllib.parse,function _normalizeUrl,Yes
-gateway.modules.auth.__init__,(relative) .authentication,header,Yes
-gateway.modules.auth.__init__,(relative) .csrf,header,Yes
-gateway.modules.auth.__init__,(relative) .jwtService,header,Yes
-gateway.modules.auth.__init__,(relative) .tokenManager,header,Yes
-gateway.modules.auth.__init__,(relative) .tokenRefreshMiddleware,header,Yes
-gateway.modules.auth.__init__,(relative) .tokenRefreshService,header,Yes
-gateway.modules.auth.authentication,fastapi,header,Yes
-gateway.modules.auth.authentication,fastapi.security,header,Yes
-gateway.modules.auth.authentication,jose,header,Yes
-gateway.modules.auth.authentication,logging,header,Yes
-gateway.modules.auth.authentication,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.auth.authentication,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.auth.authentication,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.auth.authentication,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.auth.authentication,modules.security.rootAccess,header,Yes
-gateway.modules.auth.authentication,modules.shared.auditLogger,function requireSysAdmin,Yes
-gateway.modules.auth.authentication,modules.shared.configuration,header,Yes
-gateway.modules.auth.authentication,slowapi,header,Yes
-gateway.modules.auth.authentication,slowapi.util,header,Yes
-gateway.modules.auth.authentication,typing,header,Yes
-gateway.modules.auth.csrf,fastapi,header,Yes
-gateway.modules.auth.csrf,fastapi.responses,function dispatch,Yes
-gateway.modules.auth.csrf,fastapi.responses,function dispatch,Yes
-gateway.modules.auth.csrf,logging,header,Yes
-gateway.modules.auth.csrf,starlette.middleware.base,header,Yes
-gateway.modules.auth.csrf,typing,header,Yes
-gateway.modules.auth.jwtService,datetime,header,Yes
-gateway.modules.auth.jwtService,fastapi,header,Yes
-gateway.modules.auth.jwtService,jose,header,Yes
-gateway.modules.auth.jwtService,modules.shared.configuration,header,Yes
-gateway.modules.auth.jwtService,modules.shared.timeUtils,header,Yes
-gateway.modules.auth.jwtService,typing,header,Yes
-gateway.modules.auth.jwtService,uuid,function createAccessToken,Yes
-gateway.modules.auth.jwtService,uuid,function createRefreshToken,Yes
-gateway.modules.auth.tokenManager,httpx,header,Yes
-gateway.modules.auth.tokenManager,logging,header,Yes
-gateway.modules.auth.tokenManager,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.auth.tokenManager,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.auth.tokenManager,modules.interfaces.interfaceDbApp,function getFreshToken,Yes
-gateway.modules.auth.tokenManager,modules.security.rootAccess,function getFreshToken,Yes
-gateway.modules.auth.tokenManager,modules.shared.configuration,header,Yes
-gateway.modules.auth.tokenManager,modules.shared.timeUtils,header,Yes
-gateway.modules.auth.tokenManager,typing,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,asyncio,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,fastapi,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,logging,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,modules.auth.tokenRefreshService,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,modules.shared.timeUtils,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,starlette.middleware.base,header,Yes
-gateway.modules.auth.tokenRefreshMiddleware,typing,header,Yes
-gateway.modules.auth.tokenRefreshService,logging,header,Yes
-gateway.modules.auth.tokenRefreshService,modules.auth.tokenManager,function _refresh_google_token,Yes
-gateway.modules.auth.tokenRefreshService,modules.auth.tokenManager,function _refresh_microsoft_token,Yes
-gateway.modules.auth.tokenRefreshService,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.auth.tokenRefreshService,modules.interfaces.interfaceDbApp,function refresh_expired_tokens,Yes
-gateway.modules.auth.tokenRefreshService,modules.interfaces.interfaceDbApp,function proactive_refresh,Yes
-gateway.modules.auth.tokenRefreshService,modules.security.rootAccess,function refresh_expired_tokens,Yes
-gateway.modules.auth.tokenRefreshService,modules.security.rootAccess,function proactive_refresh,Yes
-gateway.modules.auth.tokenRefreshService,modules.shared.auditLogger,header,Yes
-gateway.modules.auth.tokenRefreshService,modules.shared.timeUtils,header,Yes
-gateway.modules.auth.tokenRefreshService,typing,header,Yes
-gateway.modules.connectors.connectorDbPostgre,json,function _save_record,Yes
-gateway.modules.connectors.connectorDbPostgre,json,function _loadRecord,Yes
-gateway.modules.connectors.connectorDbPostgre,json,function _loadTable,Yes
-gateway.modules.connectors.connectorDbPostgre,json,function getRecordset,Yes
-gateway.modules.connectors.connectorDbPostgre,logging,header,Yes
-gateway.modules.connectors.connectorDbPostgre,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.connectors.connectorDbPostgre,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.connectors.connectorDbPostgre,modules.shared.configuration,header,Yes
-gateway.modules.connectors.connectorDbPostgre,modules.shared.timeUtils,header,Yes
-gateway.modules.connectors.connectorDbPostgre,psycopg2,header,Yes
-gateway.modules.connectors.connectorDbPostgre,psycopg2.extras,header,Yes
-gateway.modules.connectors.connectorDbPostgre,pydantic,header,Yes
-gateway.modules.connectors.connectorDbPostgre,threading,header,Yes
-gateway.modules.connectors.connectorDbPostgre,typing,header,Yes
-gateway.modules.connectors.connectorDbPostgre,uuid,header,Yes
-gateway.modules.connectors.connectorMessagingEmail,azure.communication.email,header,Yes
-gateway.modules.connectors.connectorMessagingEmail,logging,header,Yes
-gateway.modules.connectors.connectorMessagingEmail,modules.shared.configuration,header,Yes
-gateway.modules.connectors.connectorMessagingEmail,typing,header,Yes
-gateway.modules.connectors.connectorMessagingSms,logging,header,Yes
-gateway.modules.connectors.connectorMessagingSms,modules.shared.configuration,header,Yes
-gateway.modules.connectors.connectorMessagingSms,twilio.rest,function __init__,Yes
-gateway.modules.connectors.connectorMessagingSms,typing,header,Yes
-gateway.modules.connectors.connectorPreprocessor,httpx,header,Yes
-gateway.modules.connectors.connectorPreprocessor,logging,header,Yes
-gateway.modules.connectors.connectorPreprocessor,modules.shared.configuration,header,Yes
-gateway.modules.connectors.connectorPreprocessor,typing,header,Yes
-gateway.modules.connectors.connectorSwissTopoMapServer,aiohttp,header,Yes
-gateway.modules.connectors.connectorSwissTopoMapServer,asyncio,header,Yes
-gateway.modules.connectors.connectorSwissTopoMapServer,logging,header,Yes
-gateway.modules.connectors.connectorSwissTopoMapServer,modules.shared.configuration,header,Yes
-gateway.modules.connectors.connectorSwissTopoMapServer,re,header,Yes
-gateway.modules.connectors.connectorSwissTopoMapServer,typing,header,Yes
-gateway.modules.connectors.connectorTicketsClickup,aiohttp,header,Yes
-gateway.modules.connectors.connectorTicketsClickup,logging,header,Yes
-gateway.modules.connectors.connectorTicketsClickup,modules.datamodels.datamodelTickets,header,Yes
-gateway.modules.connectors.connectorTicketsClickup,typing,header,Yes
-gateway.modules.connectors.connectorTicketsJira,aiohttp,header,Yes
-gateway.modules.connectors.connectorTicketsJira,asyncio,header,Yes
-gateway.modules.connectors.connectorTicketsJira,json,header,Yes
-gateway.modules.connectors.connectorTicketsJira,logging,header,Yes
-gateway.modules.connectors.connectorTicketsJira,modules.datamodels.datamodelTickets,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,google.cloud,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,google.cloud,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,google.cloud,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,google.oauth2,function __init__,Yes
-gateway.modules.connectors.connectorVoiceGoogle,html,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,json,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,logging,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,modules.shared.configuration,header,Yes
-gateway.modules.connectors.connectorVoiceGoogle,typing,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.__init__,(relative) .,header,Yes
-gateway.modules.datamodels.datamodelAi,enum,header,Yes
-gateway.modules.datamodels.datamodelAi,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.datamodels.datamodelAi,pydantic,header,Yes
-gateway.modules.datamodels.datamodelAi,typing,header,Yes
-gateway.modules.datamodels.datamodelAudit,enum,header,Yes
-gateway.modules.datamodels.datamodelAudit,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelAudit,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelAudit,pydantic,header,Yes
-gateway.modules.datamodels.datamodelAudit,typing,header,Yes
-gateway.modules.datamodels.datamodelAudit,uuid,header,Yes
-gateway.modules.datamodels.datamodelChat,enum,header,Yes
-gateway.modules.datamodels.datamodelChat,modules.datamodels.datamodelWorkflow,function updateFromSelection,Yes
-gateway.modules.datamodels.datamodelChat,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelChat,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelChat,pydantic,header,Yes
-gateway.modules.datamodels.datamodelChat,typing,header,Yes
-gateway.modules.datamodels.datamodelChat,uuid,header,Yes
-gateway.modules.datamodels.datamodelDocref,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelDocref,pydantic,header,Yes
-gateway.modules.datamodels.datamodelDocref,typing,header,Yes
-gateway.modules.datamodels.datamodelDocument,datetime,header,Yes
-gateway.modules.datamodels.datamodelDocument,pydantic,header,Yes
-gateway.modules.datamodels.datamodelDocument,typing,header,Yes
-gateway.modules.datamodels.datamodelExtraction,pydantic,header,Yes
-gateway.modules.datamodels.datamodelExtraction,typing,header,Yes
-gateway.modules.datamodels.datamodelFeatures,modules.datamodels.datamodelUtils,header,Yes
-gateway.modules.datamodels.datamodelFeatures,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelFeatures,pydantic,header,Yes
-gateway.modules.datamodels.datamodelFeatures,typing,header,Yes
-gateway.modules.datamodels.datamodelFeatures,uuid,header,Yes
-gateway.modules.datamodels.datamodelFiles,base64,header,Yes
-gateway.modules.datamodels.datamodelFiles,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelFiles,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelFiles,pydantic,header,Yes
-gateway.modules.datamodels.datamodelFiles,typing,header,Yes
-gateway.modules.datamodels.datamodelFiles,uuid,header,Yes
-gateway.modules.datamodels.datamodelInvitation,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelInvitation,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelInvitation,pydantic,header,Yes
-gateway.modules.datamodels.datamodelInvitation,secrets,header,Yes
-gateway.modules.datamodels.datamodelInvitation,typing,header,Yes
-gateway.modules.datamodels.datamodelInvitation,uuid,header,Yes
-gateway.modules.datamodels.datamodelJson,typing,header,Yes
-gateway.modules.datamodels.datamodelMembership,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelMembership,pydantic,header,Yes
-gateway.modules.datamodels.datamodelMembership,uuid,header,Yes
-gateway.modules.datamodels.datamodelMessaging,enum,header,Yes
-gateway.modules.datamodels.datamodelMessaging,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelMessaging,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelMessaging,pydantic,header,Yes
-gateway.modules.datamodels.datamodelMessaging,typing,header,Yes
-gateway.modules.datamodels.datamodelMessaging,uuid,header,Yes
-gateway.modules.datamodels.datamodelPagination,math,header,Yes
-gateway.modules.datamodels.datamodelPagination,pydantic,header,Yes
-gateway.modules.datamodels.datamodelPagination,typing,header,Yes
-gateway.modules.datamodels.datamodelRbac,enum,header,Yes
-gateway.modules.datamodels.datamodelRbac,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.datamodels.datamodelRbac,modules.datamodels.datamodelUtils,header,Yes
-gateway.modules.datamodels.datamodelRbac,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelRbac,pydantic,header,Yes
-gateway.modules.datamodels.datamodelRbac,typing,header,Yes
-gateway.modules.datamodels.datamodelRbac,uuid,header,Yes
-gateway.modules.datamodels.datamodelSecurity,(relative) .datamodelUam,header,Yes
-gateway.modules.datamodels.datamodelSecurity,enum,header,Yes
-gateway.modules.datamodels.datamodelSecurity,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelSecurity,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelSecurity,pydantic,header,Yes
-gateway.modules.datamodels.datamodelSecurity,typing,header,Yes
-gateway.modules.datamodels.datamodelSecurity,uuid,header,Yes
-gateway.modules.datamodels.datamodelTickets,abc,header,Yes
-gateway.modules.datamodels.datamodelTickets,pydantic,header,Yes
-gateway.modules.datamodels.datamodelTickets,typing,header,Yes
-gateway.modules.datamodels.datamodelUam,enum,header,Yes
-gateway.modules.datamodels.datamodelUam,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelUam,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelUam,pydantic,header,Yes
-gateway.modules.datamodels.datamodelUam,typing,header,Yes
-gateway.modules.datamodels.datamodelUam,uuid,header,Yes
-gateway.modules.datamodels.datamodelUtils,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelUtils,pydantic,header,Yes
-gateway.modules.datamodels.datamodelUtils,typing,header,Yes
-gateway.modules.datamodels.datamodelUtils,uuid,header,Yes
-gateway.modules.datamodels.datamodelVoice,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelVoice,modules.shared.timeUtils,header,Yes
-gateway.modules.datamodels.datamodelVoice,pydantic,header,Yes
-gateway.modules.datamodels.datamodelVoice,uuid,header,Yes
-gateway.modules.datamodels.datamodelWorkflow,modules.datamodels.datamodelDocref,header,Yes
-gateway.modules.datamodels.datamodelWorkflow,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelWorkflow,modules.shared.jsonUtils,header,Yes
-gateway.modules.datamodels.datamodelWorkflow,pydantic,header,Yes
-gateway.modules.datamodels.datamodelWorkflow,typing,header,Yes
-gateway.modules.datamodels.datamodelWorkflowActions,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.datamodels.datamodelWorkflowActions,modules.shared.attributeUtils,header,Yes
-gateway.modules.datamodels.datamodelWorkflowActions,modules.shared.frontendTypes,header,Yes
-gateway.modules.datamodels.datamodelWorkflowActions,pydantic,header,Yes
-gateway.modules.datamodels.datamodelWorkflowActions,typing,header,Yes
-gateway.modules.features.aichat.mainAiChat,logging,header,Yes
-gateway.modules.features.aichat.mainAiChat,modules.aicore.aicoreModelRegistry,function onStart,Yes
-gateway.modules.features.aichat.mainAiChat,typing,header,Yes
-gateway.modules.features.automation.datamodelFeatureAutomation,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.automation.datamodelFeatureAutomation,pydantic,header,Yes
-gateway.modules.features.automation.datamodelFeatureAutomation,typing,header,Yes
-gateway.modules.features.automation.datamodelFeatureAutomation,uuid,header,Yes
-gateway.modules.features.automation.mainAutomation,logging,header,Yes
-gateway.modules.features.automation.mainAutomation,typing,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,(relative) .subAutomationTemplates,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,fastapi,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,fastapi,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,fastapi.responses,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,fastapi.responses,function get_automations,Yes
-gateway.modules.features.automation.routeFeatureAutomation,json,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,logging,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.auth,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.features.automation.datamodelFeatureAutomation,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.interfaces.interfaceDbChat,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.services,function execute_automation,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,modules.workflows.automation,header,Yes
-gateway.modules.features.automation.routeFeatureAutomation,typing,header,Yes
-gateway.modules.features.automation.subAutomationTemplates,typing,header,Yes
-gateway.modules.features.automation.subAutomationUtils,datetime,header,Yes
-gateway.modules.features.automation.subAutomationUtils,json,header,Yes
-gateway.modules.features.automation.subAutomationUtils,typing,header,Yes
-gateway.modules.features.chatbot.__init__,(relative) .mainChatbot,header,Yes
-gateway.modules.features.chatbot.chatbotConstants,datetime,header,Yes
-gateway.modules.features.chatbot.chatbotConstants,logging,header,Yes
-gateway.modules.features.chatbot.chatbotConstants,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.features.chatbot.chatbotConstants,re,header,Yes
-gateway.modules.features.chatbot.chatbotConstants,typing,header,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,enum,header,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,modules.datamodels.datamodelWorkflow,function updateFromSelection,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,modules.shared.timeUtils,header,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,pydantic,header,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,typing,header,Yes
-gateway.modules.features.chatbot.datamodelFeatureChatbot,uuid,header,Yes
-gateway.modules.features.chatbot.eventManager,asyncio,header,Yes
-gateway.modules.features.chatbot.eventManager,datetime,header,Yes
-gateway.modules.features.chatbot.eventManager,logging,header,Yes
-gateway.modules.features.chatbot.eventManager,typing,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,(relative) .datamodelFeatureChatbot,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,asyncio,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,datetime,function storeDebugMessageAndDocuments,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,json,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,logging,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,math,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.features.automation.datamodelFeatureAutomation,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.features.chatbot.eventManager,function createMessage,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.features.chatbot.eventManager,function createLog,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.interfaces.interfaceDbApp,function _enrichAutomationsWithUserAndMandate,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.interfaces.interfaceDbManagement,function storeDebugMessageAndDocuments,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.security.rbac,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.security.rootAccess,function setUserContext,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.shared.callbackRegistry,function _notifyAutomationChanged,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.shared.configuration,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.shared.debugLogger,function storeDebugMessageAndDocuments,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.shared.eventManagement,function deleteAutomationDefinition,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,modules.shared.timeUtils,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,os,function storeDebugMessageAndDocuments,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,typing,header,Yes
-gateway.modules.features.chatbot.interfaceFeatureChatbot,uuid,header,Yes
-gateway.modules.features.chatbot.mainChatbot,asyncio,header,Yes
-gateway.modules.features.chatbot.mainChatbot,base64,header,Yes
-gateway.modules.features.chatbot.mainChatbot,json,header,Yes
-gateway.modules.features.chatbot.mainChatbot,logging,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.connectors.connectorPreprocessor,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.datamodels.datamodelDocref,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.features.chatbot.chatbotConstants,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.features.chatbot.datamodelFeatureChatbot,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.features.chatbot.eventManager,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.interfaces.interfaceRbac,function _convert_file_ids_to_document_references,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.services,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.shared.timeUtils,header,Yes
-gateway.modules.features.chatbot.mainChatbot,modules.workflows.methods.methodAi.methodAi,header,Yes
-gateway.modules.features.chatbot.mainChatbot,re,header,Yes
-gateway.modules.features.chatbot.mainChatbot,typing,header,Yes
-gateway.modules.features.chatbot.mainChatbot,uuid,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,(relative) .,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,(relative) .,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,(relative) .datamodelFeatureChatbot,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,(relative) .eventManager,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,asyncio,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,fastapi,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,fastapi.responses,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,json,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,logging,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,math,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,modules.auth,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,modules.shared.timeUtils,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,modules.workflows.automation,header,Yes
-gateway.modules.features.chatbot.routeFeatureChatbot,typing,header,Yes
-gateway.modules.features.featureRegistry,fastapi,header,Yes
-gateway.modules.features.featureRegistry,glob,header,Yes
-gateway.modules.features.featureRegistry,importlib,header,Yes
-gateway.modules.features.featureRegistry,logging,header,Yes
-gateway.modules.features.featureRegistry,os,header,Yes
-gateway.modules.features.featureRegistry,typing,header,Yes
-gateway.modules.features.neutralizer.datamodelFeatureNeutralizer,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.neutralizer.datamodelFeatureNeutralizer,pydantic,header,Yes
-gateway.modules.features.neutralizer.datamodelFeatureNeutralizer,typing,header,Yes
-gateway.modules.features.neutralizer.datamodelFeatureNeutralizer,uuid,header,Yes
-gateway.modules.features.neutralizer.interfaceFeatureNeutralizer,logging,header,Yes
-gateway.modules.features.neutralizer.interfaceFeatureNeutralizer,modules.features.neutralizer.datamodelFeatureNeutralizer,header,Yes
-gateway.modules.features.neutralizer.interfaceFeatureNeutralizer,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.features.neutralizer.interfaceFeatureNeutralizer,modules.shared.timeUtils,header,Yes
-gateway.modules.features.neutralizer.interfaceFeatureNeutralizer,typing,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,(relative) .datamodelFeatureNeutralizer,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,asyncio,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,logging,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,modules.services,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,modules.services.serviceSharepoint.mainServiceSharepoint,function processSharepointFiles,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,typing,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizePlayground,urllib.parse,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizer,logging,header,Yes
-gateway.modules.features.neutralizer.mainNeutralizer,typing,header,Yes
-gateway.modules.features.neutralizer.routeFeatureNeutralizer,(relative) .datamodelFeatureNeutralizer,header,Yes
-gateway.modules.features.neutralizer.routeFeatureNeutralizer,(relative) .mainNeutralizePlayground,header,Yes
-gateway.modules.features.neutralizer.routeFeatureNeutralizer,fastapi,header,Yes
-gateway.modules.features.neutralizer.routeFeatureNeutralizer,logging,header,Yes
-gateway.modules.features.neutralizer.routeFeatureNeutralizer,modules.auth,header,Yes
-gateway.modules.features.neutralizer.routeFeatureNeutralizer,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,(relative) .subPatterns,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,(relative) .subProcessBinary,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,(relative) .subProcessCommon,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,(relative) .subProcessList,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,(relative) .subProcessText,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,json,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,logging,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,modules.features.neutralizer.datamodelFeatureNeutralizer,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,modules.features.neutralizer.interfaceFeatureNeutralizer,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,re,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.mainServiceNeutralization,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subParseString,(relative) .subPatterns,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subParseString,re,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subParseString,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subParseString,uuid,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subPatterns,dataclasses,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subPatterns,re,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subPatterns,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessBinary,base64,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessBinary,dataclasses,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessBinary,re,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessBinary,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessCommon,dataclasses,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessCommon,pydantic,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessCommon,re,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessCommon,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,(relative) .subParseString,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,(relative) .subPatterns,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,(relative) .subPatterns,function _anonymizeXmlElement,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,(relative) .subPatterns,function _anonymizeXmlElement,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,dataclasses,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,io,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,json,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,pandas,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,typing,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,uuid,function _anonymizeXmlElement,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,uuid,function _anonymizeTable,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,uuid,function _anonymizeXmlElement,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,uuid,function _anonymizeXmlElement,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,uuid,function _anonymizeXmlElement,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessList,xml.etree.ElementTree,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessText,(relative) .subParseString,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessText,dataclasses,header,Yes
-gateway.modules.features.neutralizer.serviceNeutralization.subProcessText,typing,header,Yes
-gateway.modules.features.realestate.datamodelFeatureRealEstate,enum,header,Yes
-gateway.modules.features.realestate.datamodelFeatureRealEstate,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.realestate.datamodelFeatureRealEstate,modules.shared.timeUtils,header,Yes
-gateway.modules.features.realestate.datamodelFeatureRealEstate,pydantic,header,Yes
-gateway.modules.features.realestate.datamodelFeatureRealEstate,typing,header,Yes
-gateway.modules.features.realestate.datamodelFeatureRealEstate,uuid,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,(relative) .datamodelFeatureRealEstate,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,logging,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.security.rbac,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.security.rootAccess,function setUserContext,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,modules.shared.configuration,header,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,re,function _isUUID,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,time,function executeQuery,Yes
-gateway.modules.features.realestate.interfaceFeatureRealEstate,typing,header,Yes
-gateway.modules.features.realestate.mainRealEstate,(relative) .datamodelFeatureRealEstate,header,Yes
-gateway.modules.features.realestate.mainRealEstate,(relative) .interfaceFeatureRealEstate,header,Yes
-gateway.modules.features.realestate.mainRealEstate,fastapi,header,Yes
-gateway.modules.features.realestate.mainRealEstate,json,header,Yes
-gateway.modules.features.realestate.mainRealEstate,logging,header,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.connectors.connectorSwissTopoMapServer,header,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.features.realestate.datamodelFeatureRealEstate,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,modules.services,header,Yes
-gateway.modules.features.realestate.mainRealEstate,re,function executeIntentBasedOperation,Yes
-gateway.modules.features.realestate.mainRealEstate,shapely.geometry,header,Yes
-gateway.modules.features.realestate.mainRealEstate,shapely.ops,header,Yes
-gateway.modules.features.realestate.mainRealEstate,typing,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,(relative) .datamodelFeatureRealEstate,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,(relative) .interfaceFeatureRealEstate,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,(relative) .mainRealEstate,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,fastapi,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,json,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,logging,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,modules.auth,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,modules.connectors.connectorSwissTopoMapServer,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,requests,header,Yes
-gateway.modules.features.realestate.routeFeatureRealEstate,typing,header,Yes
-gateway.modules.features.trustee.datamodelFeatureTrustee,modules.shared.attributeUtils,header,Yes
-gateway.modules.features.trustee.datamodelFeatureTrustee,pydantic,header,Yes
-gateway.modules.features.trustee.datamodelFeatureTrustee,typing,header,Yes
-gateway.modules.features.trustee.datamodelFeatureTrustee,uuid,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,(relative) .datamodelFeatureTrustee,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,logging,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,math,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.security.rbac,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.security.rootAccess,function setUserContext,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,modules.shared.configuration,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,re,function createOrganisation,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,typing,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,uuid,header,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,uuid,function createAccess,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,uuid,function createContract,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,uuid,function createDocument,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,uuid,function createPosition,Yes
-gateway.modules.features.trustee.interfaceFeatureTrustee,uuid,function createPositionDocument,Yes
-gateway.modules.features.trustee.mainTrustee,logging,header,Yes
-gateway.modules.features.trustee.mainTrustee,typing,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,(relative) .datamodelFeatureTrustee,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,(relative) .interfaceFeatureTrustee,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,fastapi,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,fastapi,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,fastapi.responses,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,io,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,json,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,logging,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,modules.auth,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,modules.interfaces.interfaceFeatures,header,Yes
-gateway.modules.features.trustee.routeFeatureTrustee,typing,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,asyncio,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,base64,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,dataclasses,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,logging,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,modules.aicore.aicoreModelRegistry,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,modules.aicore.aicoreModelSelector,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,time,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,typing,header,Yes
-gateway.modules.interfaces.interfaceAiObjects,uuid,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,logging,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.datamodels.datamodelMembership,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.shared.configuration,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,modules.shared.dbMultiTenantOptimizations,function _applyDatabaseOptimizations,Yes
-gateway.modules.interfaces.interfaceBootstrap,passlib.context,header,Yes
-gateway.modules.interfaces.interfaceBootstrap,typing,header,Yes
-gateway.modules.interfaces.interfaceDbApp,logging,header,Yes
-gateway.modules.interfaces.interfaceDbApp,math,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelFeatures,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelInvitation,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelMembership,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.interfaces.interfaceBootstrap,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.security.rbac,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.security.rootAccess,function getRootInterface,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.shared.configuration,header,Yes
-gateway.modules.interfaces.interfaceDbApp,modules.shared.timeUtils,header,Yes
-gateway.modules.interfaces.interfaceDbApp,passlib.context,header,Yes
-gateway.modules.interfaces.interfaceDbApp,typing,header,Yes
-gateway.modules.interfaces.interfaceDbApp,uuid,header,Yes
-gateway.modules.interfaces.interfaceDbChat,asyncio,header,Yes
-gateway.modules.interfaces.interfaceDbChat,datetime,function storeDebugMessageAndDocuments,Yes
-gateway.modules.interfaces.interfaceDbChat,json,header,Yes
-gateway.modules.interfaces.interfaceDbChat,logging,header,Yes
-gateway.modules.interfaces.interfaceDbChat,math,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.features.automation.datamodelFeatureAutomation,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.interfaces.interfaceDbApp,function _enrichAutomationsWithUserAndMandate,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.interfaces.interfaceDbManagement,function storeDebugMessageAndDocuments,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.security.rbac,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.security.rootAccess,function setUserContext,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.shared.callbackRegistry,function _notifyAutomationChanged,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.shared.configuration,header,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.shared.debugLogger,function storeDebugMessageAndDocuments,Yes
-gateway.modules.interfaces.interfaceDbChat,modules.shared.timeUtils,header,Yes
-gateway.modules.interfaces.interfaceDbChat,os,function storeDebugMessageAndDocuments,Yes
-gateway.modules.interfaces.interfaceDbChat,typing,header,Yes
-gateway.modules.interfaces.interfaceDbChat,uuid,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,base64,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,hashlib,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,logging,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,math,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelFiles,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelMessaging,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelUtils,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.datamodels.datamodelVoice,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.interfaces.interfaceDbApp,function _initializeStandardPrompts,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.security.rbac,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.security.rootAccess,function setUserContext,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.security.rootAccess,function _initializeStandardPrompts,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.shared.configuration,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,modules.shared.timeUtils,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,os,header,Yes
-gateway.modules.interfaces.interfaceDbManagement,re,function _parse_size_string,Yes
-gateway.modules.interfaces.interfaceDbManagement,typing,header,Yes
-gateway.modules.interfaces.interfaceFeatures,logging,header,Yes
-gateway.modules.interfaces.interfaceFeatures,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.interfaces.interfaceFeatures,modules.datamodels.datamodelFeatures,header,Yes
-gateway.modules.interfaces.interfaceFeatures,modules.datamodels.datamodelMembership,function syncRolesFromTemplate,Yes
-gateway.modules.interfaces.interfaceFeatures,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.interfaces.interfaceFeatures,typing,header,Yes
-gateway.modules.interfaces.interfaceFeatures,uuid,header,Yes
-gateway.modules.interfaces.interfaceMessaging,logging,header,Yes
-gateway.modules.interfaces.interfaceMessaging,modules.connectors.connectorMessagingEmail,header,Yes
-gateway.modules.interfaces.interfaceMessaging,modules.connectors.connectorMessagingSms,header,Yes
-gateway.modules.interfaces.interfaceMessaging,modules.datamodels.datamodelMessaging,header,Yes
-gateway.modules.interfaces.interfaceMessaging,typing,header,Yes
-gateway.modules.interfaces.interfaceRbac,json,header,Yes
-gateway.modules.interfaces.interfaceRbac,logging,header,Yes
-gateway.modules.interfaces.interfaceRbac,modules.connectors.connectorDbPostgre,function getRecordsetWithRBAC,Yes
-gateway.modules.interfaces.interfaceRbac,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.interfaces.interfaceRbac,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceRbac,modules.security.rbac,header,Yes
-gateway.modules.interfaces.interfaceRbac,modules.security.rootAccess,header,Yes
-gateway.modules.interfaces.interfaceRbac,pydantic,header,Yes
-gateway.modules.interfaces.interfaceRbac,typing,header,Yes
-gateway.modules.interfaces.interfaceTicketObjects,datetime,header,Yes
-gateway.modules.interfaces.interfaceTicketObjects,modules.connectors.connectorTicketsClickup,function createTicketInterfaceByType,Yes
-gateway.modules.interfaces.interfaceTicketObjects,modules.connectors.connectorTicketsJira,function createTicketInterfaceByType,Yes
-gateway.modules.interfaces.interfaceTicketObjects,typing,header,Yes
-gateway.modules.interfaces.interfaceVoiceObjects,logging,header,Yes
-gateway.modules.interfaces.interfaceVoiceObjects,modules.connectors.connectorVoiceGoogle,header,Yes
-gateway.modules.interfaces.interfaceVoiceObjects,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.interfaces.interfaceVoiceObjects,modules.datamodels.datamodelVoice,header,Yes
-gateway.modules.interfaces.interfaceVoiceObjects,modules.shared.timeUtils,header,Yes
-gateway.modules.interfaces.interfaceVoiceObjects,typing,header,Yes
-gateway.modules.routes.routeAdmin,fastapi,header,Yes
-gateway.modules.routes.routeAdmin,fastapi,header,Yes
-gateway.modules.routes.routeAdmin,fastapi.responses,header,Yes
-gateway.modules.routes.routeAdmin,fastapi.staticfiles,header,Yes
-gateway.modules.routes.routeAdmin,logging,header,Yes
-gateway.modules.routes.routeAdmin,modules.auth,header,Yes
-gateway.modules.routes.routeAdmin,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeAdmin,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeAdmin,modules.shared.configuration,header,Yes
-gateway.modules.routes.routeAdmin,os,header,Yes
-gateway.modules.routes.routeAdmin,pathlib,header,Yes
-gateway.modules.routes.routeAdmin,typing,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,fastapi,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,fastapi,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,logging,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.auth,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.interfaces.interfaceDbApp,function sync_all_automation_events,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.interfaces.interfaceDbChat,header,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.services,function sync_all_automation_events,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.shared.eventManagement,function get_all_automation_events,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.shared.eventManagement,function remove_event,Yes
-gateway.modules.routes.routeAdminAutomationEvents,modules.workflows.automation,function sync_all_automation_events,Yes
-gateway.modules.routes.routeAdminAutomationEvents,typing,header,Yes
-gateway.modules.routes.routeAdminFeatures,fastapi,header,Yes
-gateway.modules.routes.routeAdminFeatures,fastapi,header,Yes
-gateway.modules.routes.routeAdminFeatures,logging,header,Yes
-gateway.modules.routes.routeAdminFeatures,modules.auth,header,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelFeatures,header,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelMembership,function _getUserRoleInInstance,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelMembership,function _getInstancePermissions,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelMembership,function listFeatureInstanceUsers,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelMembership,function addUserToFeatureInstance,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelMembership,function removeUserFromFeatureInstance,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelMembership,function updateFeatureInstanceUserRoles,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelRbac,function _getUserRoleInInstance,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelRbac,function _getInstancePermissions,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelRbac,function listFeatureInstanceUsers,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelRbac,function getFeatureInstanceAvailableRoles,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelRbac,function _hasMandateAdminRole,Yes
-gateway.modules.routes.routeAdminFeatures,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeAdminFeatures,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeAdminFeatures,modules.interfaces.interfaceFeatures,header,Yes
-gateway.modules.routes.routeAdminFeatures,pydantic,header,Yes
-gateway.modules.routes.routeAdminFeatures,typing,header,Yes
-gateway.modules.routes.routeAdminRbacExport,fastapi,header,Yes
-gateway.modules.routes.routeAdminRbacExport,fastapi,header,Yes
-gateway.modules.routes.routeAdminRbacExport,fastapi.responses,header,Yes
-gateway.modules.routes.routeAdminRbacExport,json,header,Yes
-gateway.modules.routes.routeAdminRbacExport,logging,header,Yes
-gateway.modules.routes.routeAdminRbacExport,modules.auth,header,Yes
-gateway.modules.routes.routeAdminRbacExport,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.routes.routeAdminRbacExport,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeAdminRbacExport,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeAdminRbacExport,modules.shared.timeUtils,header,Yes
-gateway.modules.routes.routeAdminRbacExport,pydantic,header,Yes
-gateway.modules.routes.routeAdminRbacExport,typing,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,fastapi,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,logging,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,modules.auth,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,modules.datamodels.datamodelMembership,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeAdminRbacRoles,typing,header,Yes
-gateway.modules.routes.routeAdminRbacRules,fastapi,header,Yes
-gateway.modules.routes.routeAdminRbacRules,json,header,Yes
-gateway.modules.routes.routeAdminRbacRules,logging,header,Yes
-gateway.modules.routes.routeAdminRbacRules,math,header,Yes
-gateway.modules.routes.routeAdminRbacRules,modules.auth,header,Yes
-gateway.modules.routes.routeAdminRbacRules,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeAdminRbacRules,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.routes.routeAdminRbacRules,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeAdminRbacRules,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeAdminRbacRules,typing,header,Yes
-gateway.modules.routes.routeAttributes,fastapi,header,Yes
-gateway.modules.routes.routeAttributes,fastapi,header,Yes
-gateway.modules.routes.routeAttributes,logging,header,Yes
-gateway.modules.routes.routeAttributes,modules.auth,header,Yes
-gateway.modules.routes.routeAttributes,modules.shared.attributeUtils,header,Yes
-gateway.modules.routes.routeChat,(relative) .,header,Yes
-gateway.modules.routes.routeChat,fastapi,header,Yes
-gateway.modules.routes.routeChat,logging,header,Yes
-gateway.modules.routes.routeChat,modules.auth,header,Yes
-gateway.modules.routes.routeChat,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.routes.routeChat,modules.workflows.automation,header,Yes
-gateway.modules.routes.routeChat,typing,header,Yes
-gateway.modules.routes.routeDataConnections,fastapi,header,Yes
-gateway.modules.routes.routeDataConnections,fastapi,header,Yes
-gateway.modules.routes.routeDataConnections,json,header,Yes
-gateway.modules.routes.routeDataConnections,logging,header,Yes
-gateway.modules.routes.routeDataConnections,math,header,Yes
-gateway.modules.routes.routeDataConnections,modules.auth,header,Yes
-gateway.modules.routes.routeDataConnections,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeDataConnections,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.routes.routeDataConnections,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeDataConnections,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeDataConnections,modules.interfaces.interfaceDbManagement,header,Yes
-gateway.modules.routes.routeDataConnections,modules.shared.timeUtils,header,Yes
-gateway.modules.routes.routeDataConnections,typing,header,Yes
-gateway.modules.routes.routeDataFiles,fastapi,header,Yes
-gateway.modules.routes.routeDataFiles,fastapi.responses,header,Yes
-gateway.modules.routes.routeDataFiles,json,header,Yes
-gateway.modules.routes.routeDataFiles,logging,header,Yes
-gateway.modules.routes.routeDataFiles,modules.auth,header,Yes
-gateway.modules.routes.routeDataFiles,modules.datamodels.datamodelFiles,header,Yes
-gateway.modules.routes.routeDataFiles,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeDataFiles,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeDataFiles,modules.interfaces.interfaceDbManagement,header,Yes
-gateway.modules.routes.routeDataFiles,modules.shared.attributeUtils,header,Yes
-gateway.modules.routes.routeDataFiles,typing,header,Yes
-gateway.modules.routes.routeDataFiles,urllib.parse,function download_file,Yes
-gateway.modules.routes.routeDataMandates,fastapi,header,Yes
-gateway.modules.routes.routeDataMandates,fastapi,header,Yes
-gateway.modules.routes.routeDataMandates,json,header,Yes
-gateway.modules.routes.routeDataMandates,logging,header,Yes
-gateway.modules.routes.routeDataMandates,modules.auth,header,Yes
-gateway.modules.routes.routeDataMandates,modules.datamodels.datamodelMembership,header,Yes
-gateway.modules.routes.routeDataMandates,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeDataMandates,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.routes.routeDataMandates,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeDataMandates,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeDataMandates,modules.shared.attributeUtils,header,Yes
-gateway.modules.routes.routeDataMandates,modules.shared.auditLogger,header,Yes
-gateway.modules.routes.routeDataMandates,pydantic,header,Yes
-gateway.modules.routes.routeDataMandates,typing,header,Yes
-gateway.modules.routes.routeDataPrompts,fastapi,header,Yes
-gateway.modules.routes.routeDataPrompts,fastapi,header,Yes
-gateway.modules.routes.routeDataPrompts,json,header,Yes
-gateway.modules.routes.routeDataPrompts,logging,header,Yes
-gateway.modules.routes.routeDataPrompts,modules.auth,header,Yes
-gateway.modules.routes.routeDataPrompts,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeDataPrompts,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeDataPrompts,modules.datamodels.datamodelUtils,header,Yes
-gateway.modules.routes.routeDataPrompts,modules.interfaces.interfaceDbManagement,header,Yes
-gateway.modules.routes.routeDataPrompts,typing,header,Yes
-gateway.modules.routes.routeDataUsers,fastapi,header,Yes
-gateway.modules.routes.routeDataUsers,fastapi,header,Yes
-gateway.modules.routes.routeDataUsers,json,header,Yes
-gateway.modules.routes.routeDataUsers,logging,header,Yes
-gateway.modules.routes.routeDataUsers,math,function get_users,Yes
-gateway.modules.routes.routeDataUsers,modules.auth,header,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelMembership,function delete_user,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelMembership,function update_user,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelMembership,function delete_user,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelMembership,function get_user,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelMembership,function reset_user_password,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelMembership,function sendPasswordLink,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeDataUsers,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeDataUsers,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeDataUsers,modules.services,function sendPasswordLink,Yes
-gateway.modules.routes.routeDataUsers,modules.shared.auditLogger,function reset_user_password,Yes
-gateway.modules.routes.routeDataUsers,modules.shared.auditLogger,function change_password,Yes
-gateway.modules.routes.routeDataUsers,modules.shared.auditLogger,function sendPasswordLink,Yes
-gateway.modules.routes.routeDataUsers,modules.shared.configuration,function sendPasswordLink,Yes
-gateway.modules.routes.routeDataUsers,pydantic,header,Yes
-gateway.modules.routes.routeDataUsers,typing,header,Yes
-gateway.modules.routes.routeDataWorkflows,fastapi,header,Yes
-gateway.modules.routes.routeDataWorkflows,json,header,Yes
-gateway.modules.routes.routeDataWorkflows,logging,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.auth,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.interfaces.interfaceDbChat,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.interfaces.interfaceDbChat,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.interfaces.interfaceRbac,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.services,function get_all_actions,Yes
-gateway.modules.routes.routeDataWorkflows,modules.services,function get_method_actions,Yes
-gateway.modules.routes.routeDataWorkflows,modules.services,function get_action_schema,Yes
-gateway.modules.routes.routeDataWorkflows,modules.shared.attributeUtils,header,Yes
-gateway.modules.routes.routeDataWorkflows,modules.workflows.processing.shared.methodDiscovery,function get_all_actions,Yes
-gateway.modules.routes.routeDataWorkflows,modules.workflows.processing.shared.methodDiscovery,function get_all_actions,Yes
-gateway.modules.routes.routeDataWorkflows,modules.workflows.processing.shared.methodDiscovery,function get_method_actions,Yes
-gateway.modules.routes.routeDataWorkflows,modules.workflows.processing.shared.methodDiscovery,function get_method_actions,Yes
-gateway.modules.routes.routeDataWorkflows,modules.workflows.processing.shared.methodDiscovery,function get_action_schema,Yes
-gateway.modules.routes.routeDataWorkflows,modules.workflows.processing.shared.methodDiscovery,function get_action_schema,Yes
-gateway.modules.routes.routeDataWorkflows,typing,header,Yes
-gateway.modules.routes.routeGdpr,datetime,function _timestampToIso,Yes
-gateway.modules.routes.routeGdpr,fastapi,header,Yes
-gateway.modules.routes.routeGdpr,fastapi,header,Yes
-gateway.modules.routes.routeGdpr,fastapi.responses,header,Yes
-gateway.modules.routes.routeGdpr,json,header,Yes
-gateway.modules.routes.routeGdpr,logging,header,Yes
-gateway.modules.routes.routeGdpr,modules.auth,header,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelFeatures,function exportUserData,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelInvitation,function exportUserData,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelInvitation,function deleteAccount,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelMembership,function exportUserData,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelMembership,function exportUserData,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelMembership,function exportPortableData,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelMembership,function deleteAccount,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelMembership,function deleteAccount,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelSecurity,function deleteAccount,Yes
-gateway.modules.routes.routeGdpr,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeGdpr,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeGdpr,modules.shared.auditLogger,header,Yes
-gateway.modules.routes.routeGdpr,modules.shared.timeUtils,header,Yes
-gateway.modules.routes.routeGdpr,pydantic,header,Yes
-gateway.modules.routes.routeGdpr,typing,header,Yes
-gateway.modules.routes.routeInvitations,fastapi,header,Yes
-gateway.modules.routes.routeInvitations,fastapi,header,Yes
-gateway.modules.routes.routeInvitations,logging,header,Yes
-gateway.modules.routes.routeInvitations,modules.auth,header,Yes
-gateway.modules.routes.routeInvitations,modules.datamodels.datamodelFeatures,function createInvitation,Yes
-gateway.modules.routes.routeInvitations,modules.datamodels.datamodelInvitation,header,Yes
-gateway.modules.routes.routeInvitations,modules.datamodels.datamodelRbac,function _hasMandateAdminRole,Yes
-gateway.modules.routes.routeInvitations,modules.datamodels.datamodelRbac,function _isInstanceRole,Yes
-gateway.modules.routes.routeInvitations,modules.datamodels.datamodelRbac,function createInvitation,Yes
-gateway.modules.routes.routeInvitations,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeInvitations,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeInvitations,modules.security.passwordUtils,function registerAndAcceptInvitation,Yes
-gateway.modules.routes.routeInvitations,modules.shared.configuration,function createInvitation,Yes
-gateway.modules.routes.routeInvitations,modules.shared.configuration,function listInvitations,Yes
-gateway.modules.routes.routeInvitations,modules.shared.timeUtils,header,Yes
-gateway.modules.routes.routeInvitations,pydantic,header,Yes
-gateway.modules.routes.routeInvitations,typing,header,Yes
-gateway.modules.routes.routeMessaging,fastapi,header,Yes
-gateway.modules.routes.routeMessaging,fastapi,header,Yes
-gateway.modules.routes.routeMessaging,json,header,Yes
-gateway.modules.routes.routeMessaging,logging,header,Yes
-gateway.modules.routes.routeMessaging,modules.auth,header,Yes
-gateway.modules.routes.routeMessaging,modules.datamodels.datamodelMessaging,header,Yes
-gateway.modules.routes.routeMessaging,modules.datamodels.datamodelPagination,header,Yes
-gateway.modules.routes.routeMessaging,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.routes.routeMessaging,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeMessaging,modules.interfaces.interfaceDbApp,function _hasTriggerPermission,Yes
-gateway.modules.routes.routeMessaging,modules.interfaces.interfaceDbManagement,header,Yes
-gateway.modules.routes.routeMessaging,modules.services,function triggerSubscription,Yes
-gateway.modules.routes.routeMessaging,typing,header,Yes
-gateway.modules.routes.routeSecurityAdmin,fastapi,header,Yes
-gateway.modules.routes.routeSecurityAdmin,fastapi.responses,header,Yes
-gateway.modules.routes.routeSecurityAdmin,logging,header,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.datamodels.datamodelMembership,function revoke_tokens_by_mandate,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeSecurityAdmin,modules.shared.configuration,header,Yes
-gateway.modules.routes.routeSecurityAdmin,os,header,Yes
-gateway.modules.routes.routeSecurityAdmin,typing,header,Yes
-gateway.modules.routes.routeSecurityGoogle,fastapi,header,Yes
-gateway.modules.routes.routeSecurityGoogle,fastapi.responses,header,Yes
-gateway.modules.routes.routeSecurityGoogle,httpx,header,Yes
-gateway.modules.routes.routeSecurityGoogle,jose,function auth_callback,Yes
-gateway.modules.routes.routeSecurityGoogle,json,header,Yes
-gateway.modules.routes.routeSecurityGoogle,logging,header,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.datamodels.datamodelSecurity,function auth_callback,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.shared.auditLogger,function logout,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.shared.configuration,header,Yes
-gateway.modules.routes.routeSecurityGoogle,modules.shared.timeUtils,header,Yes
-gateway.modules.routes.routeSecurityGoogle,requests_oauthlib,header,Yes
-gateway.modules.routes.routeSecurityGoogle,typing,header,Yes
-gateway.modules.routes.routeSecurityLocal,datetime,header,Yes
-gateway.modules.routes.routeSecurityLocal,fastapi,header,Yes
-gateway.modules.routes.routeSecurityLocal,fastapi.responses,header,Yes
-gateway.modules.routes.routeSecurityLocal,fastapi.security,header,Yes
-gateway.modules.routes.routeSecurityLocal,html,function _sendAuthEmail,Yes
-gateway.modules.routes.routeSecurityLocal,jose,header,Yes
-gateway.modules.routes.routeSecurityLocal,logging,header,Yes
-gateway.modules.routes.routeSecurityLocal,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityLocal,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityLocal,modules.datamodels.datamodelMessaging,function _sendAuthEmail,Yes
-gateway.modules.routes.routeSecurityLocal,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.routes.routeSecurityLocal,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeSecurityLocal,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeSecurityLocal,modules.interfaces.interfaceMessaging,function _sendAuthEmail,Yes
-gateway.modules.routes.routeSecurityLocal,modules.shared.auditLogger,function login,Yes
-gateway.modules.routes.routeSecurityLocal,modules.shared.auditLogger,function logout,Yes
-gateway.modules.routes.routeSecurityLocal,modules.shared.auditLogger,function passwordReset,Yes
-gateway.modules.routes.routeSecurityLocal,modules.shared.auditLogger,function login,Yes
-gateway.modules.routes.routeSecurityLocal,modules.shared.configuration,header,Yes
-gateway.modules.routes.routeSecurityLocal,typing,header,Yes
-gateway.modules.routes.routeSecurityLocal,uuid,header,Yes
-gateway.modules.routes.routeSecurityMsft,fastapi,header,Yes
-gateway.modules.routes.routeSecurityMsft,fastapi.responses,header,Yes
-gateway.modules.routes.routeSecurityMsft,httpx,header,Yes
-gateway.modules.routes.routeSecurityMsft,jose,function auth_callback,Yes
-gateway.modules.routes.routeSecurityMsft,json,header,Yes
-gateway.modules.routes.routeSecurityMsft,logging,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.auth,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.shared.auditLogger,function logout,Yes
-gateway.modules.routes.routeSecurityMsft,modules.shared.configuration,header,Yes
-gateway.modules.routes.routeSecurityMsft,modules.shared.timeUtils,header,Yes
-gateway.modules.routes.routeSecurityMsft,msal,header,Yes
-gateway.modules.routes.routeSecurityMsft,typing,header,Yes
-gateway.modules.routes.routeSharepoint,fastapi,header,Yes
-gateway.modules.routes.routeSharepoint,logging,header,Yes
-gateway.modules.routes.routeSharepoint,modules.auth,header,Yes
-gateway.modules.routes.routeSharepoint,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeSharepoint,modules.interfaces.interfaceDbApp,header,Yes
-gateway.modules.routes.routeSharepoint,modules.services,header,Yes
-gateway.modules.routes.routeSharepoint,typing,header,Yes
-gateway.modules.routes.routeVoiceGoogle,base64,header,Yes
-gateway.modules.routes.routeVoiceGoogle,fastapi,header,Yes
-gateway.modules.routes.routeVoiceGoogle,fastapi.responses,header,Yes
-gateway.modules.routes.routeVoiceGoogle,json,header,Yes
-gateway.modules.routes.routeVoiceGoogle,logging,header,Yes
-gateway.modules.routes.routeVoiceGoogle,modules.auth,header,Yes
-gateway.modules.routes.routeVoiceGoogle,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.routes.routeVoiceGoogle,modules.interfaces.interfaceVoiceObjects,header,Yes
-gateway.modules.routes.routeVoiceGoogle,typing,header,Yes
-gateway.modules.security.__init__,(relative) .rbac,header,Yes
-gateway.modules.security.__init__,(relative) .rbacHelpers,header,Yes
-gateway.modules.security.__init__,(relative) .rootAccess,header,Yes
-gateway.modules.security.passwordUtils,passlib.context,header,Yes
-gateway.modules.security.passwordUtils,typing,header,Yes
-gateway.modules.security.rbac,logging,header,Yes
-gateway.modules.security.rbac,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.security.rbac,modules.datamodels.datamodelMembership,header,Yes
-gateway.modules.security.rbac,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.security.rbac,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.security.rbac,typing,header,Yes
-gateway.modules.security.rbacCatalog,logging,header,Yes
-gateway.modules.security.rbacCatalog,threading,header,Yes
-gateway.modules.security.rbacCatalog,typing,header,Yes
-gateway.modules.security.rbacHelpers,logging,header,Yes
-gateway.modules.security.rbacHelpers,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.security.rbacHelpers,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.security.rbacHelpers,modules.security.rbac,header,Yes
-gateway.modules.security.rbacHelpers,typing,header,Yes
-gateway.modules.security.rootAccess,logging,header,Yes
-gateway.modules.security.rootAccess,modules.connectors.connectorDbPostgre,header,Yes
-gateway.modules.security.rootAccess,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.security.rootAccess,modules.interfaces.interfaceBootstrap,function _ensureBootstrap,Yes
-gateway.modules.security.rootAccess,modules.shared.configuration,header,Yes
-gateway.modules.services.__init__,(relative) .serviceAi.mainServiceAi,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceChat.mainServiceChat,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceExtraction.mainServiceExtraction,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceGeneration.mainServiceGeneration,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceMessaging.mainServiceMessaging,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceSecurity.mainServiceSecurity,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceSharepoint.mainServiceSharepoint,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceTicket.mainServiceTicket,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceUtils.mainServiceUtils,function __init__,Yes
-gateway.modules.services.__init__,(relative) .serviceWeb.mainServiceWeb,function __init__,Yes
-gateway.modules.services.__init__,glob,header,Yes
-gateway.modules.services.__init__,importlib,header,Yes
-gateway.modules.services.__init__,logging,header,Yes
-gateway.modules.services.__init__,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.__init__,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.services.__init__,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.modules.services.__init__,modules.interfaces.interfaceDbChat,function __init__,Yes
-gateway.modules.services.__init__,modules.interfaces.interfaceDbManagement,function __init__,Yes
-gateway.modules.services.__init__,os,header,Yes
-gateway.modules.services.__init__,typing,header,Yes
-gateway.modules.services.serviceAi.mainAiChat,logging,header,Yes
-gateway.modules.services.serviceAi.mainAiChat,modules.aicore.aicoreModelRegistry,function onStart,Yes
-gateway.modules.services.serviceAi.mainAiChat,typing,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subAiCallLooping,function _initializeSubmodules,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subContentExtraction,function _initializeSubmodules,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subDocumentIntents,function _initializeSubmodules,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subJsonResponseHandling,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subResponseParsing,function _initializeSubmodules,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subStructureFilling,function _initializeSubmodules,Yes
-gateway.modules.services.serviceAi.mainServiceAi,(relative) .subStructureGeneration,function _initializeSubmodules,Yes
-gateway.modules.services.serviceAi.mainServiceAi,base64,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,json,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,logging,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.interfaces.interfaceAiObjects,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.services.serviceExtraction.mainServiceExtraction,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.services.serviceGeneration.mainServiceGeneration,function renderResult,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.services.serviceGeneration.paths.codePath,function _handleCodeGeneration,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.services.serviceGeneration.paths.documentPath,function _handleDocumentGeneration,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.services.serviceGeneration.paths.imagePath,function _handleImageGeneration,Yes
-gateway.modules.services.serviceAi.mainServiceAi,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,re,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,time,header,Yes
-gateway.modules.services.serviceAi.mainServiceAi,time,function _handleDataExtraction,Yes
-gateway.modules.services.serviceAi.mainServiceAi,typing,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,(relative) .subJsonResponseHandling,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,(relative) .subLoopingUseCases,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,json,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,logging,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.shared.jsonContinuation,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceAi.subAiCallLooping,typing,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,base64,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,json,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,logging,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,modules.datamodels.datamodelAi,function extractTextFromImage,Yes
-gateway.modules.services.serviceAi.subContentExtraction,modules.datamodels.datamodelAi,function processTextContentWithAi,Yes
-gateway.modules.services.serviceAi.subContentExtraction,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceAi.subContentExtraction,traceback,function extractTextFromImage,Yes
-gateway.modules.services.serviceAi.subContentExtraction,traceback,function processTextContentWithAi,Yes
-gateway.modules.services.serviceAi.subContentExtraction,typing,header,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,json,header,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,logging,header,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,traceback,function resolvePreExtractedDocument,Yes
-gateway.modules.services.serviceAi.subDocumentIntents,typing,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,datetime,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,json,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,logging,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,os,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,re,header,Yes
-gateway.modules.services.serviceAi.subJsonMerger,typing,header,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,(relative) .subJsonMerger,function mergeJsonStringsWithOverlap,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,json,header,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,logging,header,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,modules.shared.debugLogger,function mergeFragmentIntoSection,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,re,header,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,re,function _extractRowsFromFragment,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,re,function _detectAndNormalizeFragment,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,traceback,function _mergeJsonStructuresGeneric,Yes
-gateway.modules.services.serviceAi.subJsonResponseHandling,typing,header,Yes
-gateway.modules.services.serviceAi.subLoopingUseCases,dataclasses,header,Yes
-gateway.modules.services.serviceAi.subLoopingUseCases,json,function _handleChapterStructureFinalResult,Yes
-gateway.modules.services.serviceAi.subLoopingUseCases,json,function _handleCodeStructureFinalResult,Yes
-gateway.modules.services.serviceAi.subLoopingUseCases,json,function _handleCodeContentFinalResult,Yes
-gateway.modules.services.serviceAi.subLoopingUseCases,logging,header,Yes
-gateway.modules.services.serviceAi.subLoopingUseCases,typing,header,Yes
-gateway.modules.services.serviceAi.subResponseParsing,(relative) .subJsonResponseHandling,header,Yes
-gateway.modules.services.serviceAi.subResponseParsing,json,header,Yes
-gateway.modules.services.serviceAi.subResponseParsing,logging,header,Yes
-gateway.modules.services.serviceAi.subResponseParsing,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.subResponseParsing,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceAi.subResponseParsing,typing,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,asyncio,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,base64,function _processAiResponseForSection,Yes
-gateway.modules.services.serviceAi.subStructureFilling,copy,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,json,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,logging,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.datamodels.datamodelJson,function _getAcceptedSectionTypesForFormat,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.datamodels.datamodelJson,function _getAcceptedSectionTypesForFormat,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.services.serviceGeneration.renderers.registry,function _getAcceptedSectionTypesForFormat,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.shared.jsonContinuation,function buildSectionPromptWithContinuation,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.shared.jsonUtils,function _extractAndMergeMultipleJsonBlocks,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.shared.jsonUtils,function _processAiResponseForSection,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.shared.jsonUtils,function _processSingleSection,Yes
-gateway.modules.services.serviceAi.subStructureFilling,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceAi.subStructureFilling,typing,header,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,json,header,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,logging,header,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,modules.services.serviceGeneration.renderers.registry,function generateStructure,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,modules.shared,function generateStructure,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,modules.shared.jsonContinuation,function generateStructure,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceAi.subStructureGeneration,typing,header,Yes
-gateway.modules.services.serviceChat.mainServiceChat,json,function calculateObjectSize,Yes
-gateway.modules.services.serviceChat.mainServiceChat,logging,header,Yes
-gateway.modules.services.serviceChat.mainServiceChat,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceChat.mainServiceChat,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.serviceChat.mainServiceChat,modules.datamodels.datamodelDocref,function getChatDocumentsFromDocumentList,Yes
-gateway.modules.services.serviceChat.mainServiceChat,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.services.serviceChat.mainServiceChat,modules.shared.progressLogger,header,Yes
-gateway.modules.services.serviceChat.mainServiceChat,sys,function calculateObjectSize,Yes
-gateway.modules.services.serviceChat.mainServiceChat,typing,header,Yes
-gateway.modules.services.serviceExtraction.__init__,(relative) .mainServiceExtraction,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerImage,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerImage,PIL,function chunk,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerImage,base64,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerImage,io,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerImage,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerImage,typing,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerStructure,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerStructure,json,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerStructure,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerStructure,typing,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerTable,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerTable,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerTable,typing,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerText,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerText,logging,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerText,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.chunking.chunkerText,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorBinary,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorBinary,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorBinary,base64,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorBinary,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorBinary,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorCsv,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorCsv,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorCsv,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorCsv,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorDocx,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorDocx,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorDocx,docx,function _load,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorDocx,io,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorDocx,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorDocx,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorHtml,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorHtml,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorHtml,bs4,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorHtml,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorHtml,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,PIL,function extract,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,base64,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,io,function extract,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,logging,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorImage,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorJson,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorJson,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorJson,json,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorJson,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorJson,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,PyPDF2,function _load,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,base64,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,fitz,function _load,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,io,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPdf,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,base64,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,io,function extract,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,logging,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,pptx,function _load,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorPptx,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorSql,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorSql,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorSql,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorSql,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorText,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorText,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorText,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorText,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,datetime,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,io,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,openpyxl,function _load,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXlsx,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXml,(relative) ..subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXml,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXml,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXml,typing,header,Yes
-gateway.modules.services.serviceExtraction.extractors.extractorXml,xml.etree.ElementTree,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,(relative) .merging.mergerDefault,function applyMerging,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,(relative) .merging.mergerTable,function applyMerging,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,(relative) .merging.mergerText,function applyMerging,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,(relative) .subMerger,function applyMerging,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,(relative) .subPipeline,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,(relative) .subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,asyncio,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,base64,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,json,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,logging,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.aicore.aicoreModelRegistry,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.aicore.aicoreModelSelector,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.interfaces.interfaceDbManagement,function extractContent,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.shared.debugLogger,function extractContent,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,time,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,typing,header,Yes
-gateway.modules.services.serviceExtraction.mainServiceExtraction,uuid,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerDefault,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerDefault,typing,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerTable,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerTable,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerTable,typing,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerText,(relative) ..subUtils,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerText,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.merging.mergerText,typing,header,Yes
-gateway.modules.services.serviceExtraction.subMerger,(relative) .subUtils,header,Yes
-gateway.modules.services.serviceExtraction.subMerger,logging,header,Yes
-gateway.modules.services.serviceExtraction.subMerger,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.subMerger,typing,header,Yes
-gateway.modules.services.serviceExtraction.subPipeline,(relative) .mainServiceExtraction,function runExtraction,Yes
-gateway.modules.services.serviceExtraction.subPipeline,(relative) .subRegistry,header,Yes
-gateway.modules.services.serviceExtraction.subPipeline,(relative) .subUtils,header,Yes
-gateway.modules.services.serviceExtraction.subPipeline,logging,header,Yes
-gateway.modules.services.serviceExtraction.subPipeline,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.subPipeline,typing,header,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,json,header,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,logging,header,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,modules.shared.debugLogger,function buildExtractionPrompt,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,typing,header,Yes
-gateway.modules.services.serviceExtraction.subPromptBuilderExtraction,typing,header,Yes
-gateway.modules.services.serviceExtraction.subRegistry,(relative) .chunking.chunkerImage,function __init__,Yes
-gateway.modules.services.serviceExtraction.subRegistry,(relative) .chunking.chunkerStructure,function __init__,Yes
-gateway.modules.services.serviceExtraction.subRegistry,(relative) .chunking.chunkerTable,function __init__,Yes
-gateway.modules.services.serviceExtraction.subRegistry,(relative) .chunking.chunkerText,function __init__,Yes
-gateway.modules.services.serviceExtraction.subRegistry,(relative) .extractors.extractorBinary,function _auto_discover_extractors,Yes
-gateway.modules.services.serviceExtraction.subRegistry,importlib,function _auto_discover_extractors,Yes
-gateway.modules.services.serviceExtraction.subRegistry,logging,header,Yes
-gateway.modules.services.serviceExtraction.subRegistry,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceExtraction.subRegistry,os,function _auto_discover_extractors,Yes
-gateway.modules.services.serviceExtraction.subRegistry,pathlib,function _auto_discover_extractors,Yes
-gateway.modules.services.serviceExtraction.subRegistry,traceback,function _auto_discover_extractors,Yes
-gateway.modules.services.serviceExtraction.subRegistry,traceback,function __init__,Yes
-gateway.modules.services.serviceExtraction.subRegistry,typing,header,Yes
-gateway.modules.services.serviceExtraction.subUtils,uuid,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,(relative) .renderers.registry,function _getFormatRenderer,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,base64,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,logging,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.services.serviceExtraction.subPromptBuilderExtraction,function getAdaptiveExtractionPrompt,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.services.serviceGeneration.renderers.registry,function renderReport,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.services.serviceGeneration.subContentGenerator,function generateDocumentWithTwoPhases,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.services.serviceGeneration.subDocumentUtility,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,modules.services.serviceGeneration.subStructureGenerator,function generateDocumentWithTwoPhases,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,traceback,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,typing,header,Yes
-gateway.modules.services.serviceGeneration.mainServiceGeneration,uuid,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,json,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,logging,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.datamodels.datamodelDocument,function generateCode,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.services.serviceGeneration.renderers.registry,function _getCodeRenderer,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.shared.jsonContinuation,function _generateCodeStructure,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.shared.jsonContinuation,function _generateSingleFileContent,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,modules.shared.jsonUtils,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,re,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,time,header,Yes
-gateway.modules.services.serviceGeneration.paths.codePath,typing,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,copy,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,json,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,logging,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,time,header,Yes
-gateway.modules.services.serviceGeneration.paths.documentPath,typing,header,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,base64,function generateImages,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,json,function generateImages,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,logging,header,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,time,header,Yes
-gateway.modules.services.serviceGeneration.paths.imagePath,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.codeRendererBaseTemplate,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.codeRendererBaseTemplate,abc,header,Yes
-gateway.modules.services.serviceGeneration.renderers.codeRendererBaseTemplate,logging,header,Yes
-gateway.modules.services.serviceGeneration.renderers.codeRendererBaseTemplate,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.codeRendererBaseTemplate,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,PIL,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,abc,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,base64,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,datetime,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,io,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,json,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,logging,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,modules.datamodels.datamodelJson,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,re,header,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,re,function _determineFilename,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,threading,function _getAiStyles,Yes
-gateway.modules.services.serviceGeneration.renderers.documentRendererBaseTemplate,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,importlib,header,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,logging,header,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,os,function discoverRenderers,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,pathlib,function discoverRenderers,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,sys,function discoverRenderers,Yes
-gateway.modules.services.serviceGeneration.renderers.registry,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeCsv,(relative) .codeRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeCsv,(relative) .rendererCsv,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeCsv,csv,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeCsv,io,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeCsv,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeCsv,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeJson,(relative) .codeRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeJson,(relative) .rendererJson,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeJson,json,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeJson,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeJson,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeXml,(relative) .codeRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeXml,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeXml,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeXml,xml.dom,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCodeXml,xml.etree.ElementTree,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCsv,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCsv,csv,function _convertRowsToCsv,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCsv,io,function _convertRowsToCsv,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCsv,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererCsv,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,(relative) .rendererHtml,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,PIL,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,base64,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,csv,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.enum.style,function _setupDocumentStyles,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.enum.style,function _createStyle,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.enum.table,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.enum.text,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.ns,function _renderTableFastXml,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.shared,function _renderTableFastXml,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.shared,function _createTableBordersXml,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.shared,function _createTableRowXml,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.shared,function _applyHorizontalBordersOnly,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.shared,function _setCellBackground,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.oxml.shared,function _setCellBackgroundFast,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,docx.shared,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,io,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,lxml,function _renderTableFastXml,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,re,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,time,function _generateDocxFromJson,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,time,function _renderJsonTable,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,time,function _renderTableFastXml,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererDocx,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,base64,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,base64,function _replaceImageDataUris,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,html,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,html,function _replaceImageDataUris,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,re,function _replaceImageDataUris,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,re,function _extractImages,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererHtml,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,base64,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,json,function _generateAiImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,logging,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,modules.datamodels.datamodelAi,function _generateAiImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,modules.datamodels.datamodelAi,function _compressPromptWithAi,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererImage,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererJson,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererJson,json,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererJson,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererJson,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererJson,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererMarkdown,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererMarkdown,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererMarkdown,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererMarkdown,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,(relative) .rendererHtml,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,PIL,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,base64,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,base64,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,io,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,io,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,json,function _getAiStylesWithPdfColors,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,modules.datamodels.datamodelAi,function _getAiStylesWithPdfColors,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,re,function _getAiStylesWithPdfColors,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,re,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib.enums,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib.pagesizes,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib.pagesizes,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib.styles,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib.units,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.lib.units,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.platypus,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,reportlab.platypus,function _renderJsonImage,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPdf,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,PIL,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,PIL,function _addImagesToSlideInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,PIL,function _addImagesToSlideInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,base64,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,base64,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,base64,function _addImagesToSlideInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,datetime,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,io,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,io,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,io,function _addImagesToSlideInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,json,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,logging,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _addTableToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _addBulletListToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _addHeadingToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _addParagraphToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _addCodeBlockToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _renderSlideContentWithFrames,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _renderTextSectionsInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.dml.color,function _renderSectionToTextFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _addTableToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _addBulletListToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _addParagraphToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _renderSlideContentWithFrames,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _renderTextSectionsInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _renderSectionToTextFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.enum.text,function _addImagesToSlideInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addTableToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addBulletListToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addHeadingToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addParagraphToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addCodeBlockToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _renderSlideContentWithFrames,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _renderTextSectionsInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _renderSectionToTextFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addImagesToSlideInFrame,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _renderSlideContentWithFrames,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,pptx.util,function _addBulletListToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,re,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,re,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,traceback,function _addImagesToSlide,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererPptx,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererText,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererText,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererText,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererText,typing,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,(relative) .documentRendererBaseTemplate,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,(relative) .rendererCsv,function render,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,base64,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,base64,function _addImageToExcel,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,datetime,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,dateutil,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,io,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,io,function _addImageToExcel,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,json,function _getAiStylesWithExcelColors,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,modules.datamodels.datamodelAi,function _getAiStylesWithExcelColors,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,modules.datamodels.datamodelDocument,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,modules.datamodels.datamodelJson,function getAcceptedSectionTypes,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,openpyxl,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,openpyxl.drawing.image,function _addImageToExcel,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,openpyxl.styles,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,openpyxl.utils,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,openpyxl.worksheet.table,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,re,header,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,re,function _getAiStylesWithExcelColors,Yes
-gateway.modules.services.serviceGeneration.renderers.rendererXlsx,typing,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,asyncio,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,base64,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,base64,function _generateImageSection,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,json,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,logging,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,modules.datamodels.datamodelAi,function _generateSimpleSection,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,modules.datamodels.datamodelAi,function _generateImageSection,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,modules.services.serviceGeneration.subContentIntegrator,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,modules.shared.jsonUtils,function _generateSimpleSection,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,re,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,traceback,header,Yes
-gateway.modules.services.serviceGeneration.subContentGenerator,typing,header,Yes
-gateway.modules.services.serviceGeneration.subContentIntegrator,json,function integrateContent,Yes
-gateway.modules.services.serviceGeneration.subContentIntegrator,logging,header,Yes
-gateway.modules.services.serviceGeneration.subContentIntegrator,typing,header,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,csv,function convertDocumentDataToString,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,csv,function convertDocumentDataToString,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,io,function convertDocumentDataToString,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,io,function convertDocumentDataToString,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,json,header,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,logging,header,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,os,header,Yes
-gateway.modules.services.serviceGeneration.subDocumentUtility,typing,header,Yes
-gateway.modules.services.serviceGeneration.subJsonSchema,typing,header,Yes
-gateway.modules.services.serviceGeneration.subPromptBuilderGeneration,logging,header,Yes
-gateway.modules.services.serviceGeneration.subPromptBuilderGeneration,modules.datamodels.datamodelJson,header,Yes
-gateway.modules.services.serviceGeneration.subPromptBuilderGeneration,typing,header,Yes
-gateway.modules.services.serviceGeneration.subStructureGenerator,json,header,Yes
-gateway.modules.services.serviceGeneration.subStructureGenerator,json,function _createStructurePrompt,Yes
-gateway.modules.services.serviceGeneration.subStructureGenerator,logging,header,Yes
-gateway.modules.services.serviceGeneration.subStructureGenerator,modules.datamodels.datamodelAi,function generateStructure,Yes
-gateway.modules.services.serviceGeneration.subStructureGenerator,modules.datamodels.datamodelJson,header,Yes
-gateway.modules.services.serviceGeneration.subStructureGenerator,typing,header,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,html,function _textToHtml,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,importlib,function _loadSubscriptionFunction,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,logging,header,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,modules.datamodels.datamodelMessaging,header,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,modules.interfaces.interfaceMessaging,header,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,modules.shared.timeUtils,header,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,re,header,Yes
-gateway.modules.services.serviceMessaging.mainServiceMessaging,typing,header,Yes
-gateway.modules.services.serviceMessaging.subscriptions.subSubscriptionSystemErrors,modules.datamodels.datamodelMessaging,header,Yes
-gateway.modules.services.serviceMessaging.subscriptions.subSubscriptionSystemErrors,typing,header,Yes
-gateway.modules.services.serviceNormalization.mainServiceNormalization,json,header,Yes
-gateway.modules.services.serviceNormalization.mainServiceNormalization,os,header,Yes
-gateway.modules.services.serviceNormalization.mainServiceNormalization,typing,header,Yes
-gateway.modules.services.serviceSecurity.mainServiceSecurity,logging,header,Yes
-gateway.modules.services.serviceSecurity.mainServiceSecurity,modules.auth,header,Yes
-gateway.modules.services.serviceSecurity.mainServiceSecurity,modules.datamodels.datamodelSecurity,header,Yes
-gateway.modules.services.serviceSecurity.mainServiceSecurity,typing,header,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,aiohttp,header,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,asyncio,header,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,datetime,function getFolderUsageAnalytics,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,logging,header,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,typing,header,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,urllib.parse,function findSiteByWebUrl,Yes
-gateway.modules.services.serviceSharepoint.mainServiceSharepoint,urllib.parse,function getSiteByStandardPath,Yes
-gateway.modules.services.serviceTicket.mainServiceTicket,logging,header,Yes
-gateway.modules.services.serviceTicket.mainServiceTicket,modules.interfaces.interfaceTicketObjects,header,Yes
-gateway.modules.services.serviceTicket.mainServiceTicket,typing,header,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,json,function writeDebugArtifact,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,logging,header,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.interfaces.interfaceDbChat,function storeDebugMessageAndDocuments,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared,header,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared.configuration,header,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared.debugLogger,function writeDebugFile,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared.debugLogger,function debugLogToFile,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared.debugLogger,function writeDebugArtifact,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared.eventManagement,header,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,modules.shared.timeUtils,header,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,re,function sanitizePromptContent,Yes
-gateway.modules.services.serviceUtils.mainServiceUtils,typing,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,asyncio,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,json,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,logging,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,time,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,time,function _processCrawlResultsWithHierarchy,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,typing,header,Yes
-gateway.modules.services.serviceWeb.mainServiceWeb,urllib.parse,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.__init__,(relative) .,header,Yes
-gateway.modules.shared.attributeUtils,importlib,header,Yes
-gateway.modules.shared.attributeUtils,inspect,header,Yes
-gateway.modules.shared.attributeUtils,logging,header,Yes
-gateway.modules.shared.attributeUtils,os,header,Yes
-gateway.modules.shared.attributeUtils,pydantic,header,Yes
-gateway.modules.shared.attributeUtils,typing,header,Yes
-gateway.modules.shared.auditLogger,datetime,header,Yes
-gateway.modules.shared.auditLogger,logging,header,Yes
-gateway.modules.shared.auditLogger,modules.connectors.connectorDbPostgre,function _ensureInitialized,Yes
-gateway.modules.shared.auditLogger,modules.datamodels.datamodelAudit,function _ensureInitialized,Yes
-gateway.modules.shared.auditLogger,modules.datamodels.datamodelAudit,function getAuditLogs,Yes
-gateway.modules.shared.auditLogger,modules.datamodels.datamodelAudit,function cleanupOldEntries,Yes
-gateway.modules.shared.auditLogger,modules.datamodels.datamodelAudit,function logEvent,Yes
-gateway.modules.shared.auditLogger,modules.shared.configuration,header,Yes
-gateway.modules.shared.auditLogger,modules.shared.eventManagement,function registerAuditLogCleanupScheduler,Yes
-gateway.modules.shared.auditLogger,modules.shared.timeUtils,header,Yes
-gateway.modules.shared.auditLogger,time,function cleanupOldEntries,Yes
-gateway.modules.shared.auditLogger,typing,header,Yes
-gateway.modules.shared.callbackRegistry,asyncio,header,Yes
-gateway.modules.shared.callbackRegistry,logging,header,Yes
-gateway.modules.shared.callbackRegistry,typing,header,Yes
-gateway.modules.shared.configuration,base64,header,Yes
-gateway.modules.shared.configuration,cryptography.fernet,header,Yes
-gateway.modules.shared.configuration,cryptography.hazmat.primitives,header,Yes
-gateway.modules.shared.configuration,cryptography.hazmat.primitives.kdf.pbkdf2,header,Yes
-gateway.modules.shared.configuration,json,header,Yes
-gateway.modules.shared.configuration,logging,header,Yes
-gateway.modules.shared.configuration,modules.shared.auditLogger,function encryptValue,Yes
-gateway.modules.shared.configuration,modules.shared.auditLogger,function decryptValue,Yes
-gateway.modules.shared.configuration,modules.shared.auditLogger,function get,Yes
-gateway.modules.shared.configuration,os,header,Yes
-gateway.modules.shared.configuration,pathlib,header,Yes
-gateway.modules.shared.configuration,time,header,Yes
-gateway.modules.shared.configuration,typing,header,Yes
-gateway.modules.shared.dbMultiTenantOptimizations,logging,header,Yes
-gateway.modules.shared.dbMultiTenantOptimizations,typing,header,Yes
-gateway.modules.shared.debugLogger,datetime,header,Yes
-gateway.modules.shared.debugLogger,modules.shared.configuration,header,Yes
-gateway.modules.shared.debugLogger,modules.shared.timeUtils,function debugLogToFile,Yes
-gateway.modules.shared.debugLogger,os,header,Yes
-gateway.modules.shared.debugLogger,typing,header,Yes
-gateway.modules.shared.eventManagement,apscheduler.schedulers.asyncio,header,Yes
-gateway.modules.shared.eventManagement,apscheduler.triggers.cron,header,Yes
-gateway.modules.shared.eventManagement,apscheduler.triggers.interval,header,Yes
-gateway.modules.shared.eventManagement,logging,header,Yes
-gateway.modules.shared.eventManagement,typing,header,Yes
-gateway.modules.shared.eventManagement,zoneinfo,header,Yes
-gateway.modules.shared.frontendTypes,enum,header,Yes
-gateway.modules.shared.frontendTypes,typing,header,Yes
-gateway.modules.shared.jsonContinuation,dataclasses,header,Yes
-gateway.modules.shared.jsonContinuation,enum,header,Yes
-gateway.modules.shared.jsonContinuation,json,header,Yes
-gateway.modules.shared.jsonContinuation,logging,header,Yes
-gateway.modules.shared.jsonContinuation,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.shared.jsonContinuation,re,header,Yes
-gateway.modules.shared.jsonContinuation,typing,header,Yes
-gateway.modules.shared.jsonUtils,json,header,Yes
-gateway.modules.shared.jsonUtils,logging,header,Yes
-gateway.modules.shared.jsonUtils,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.shared.jsonUtils,modules.shared.jsonContinuation,function buildContinuationContext,Yes
-gateway.modules.shared.jsonUtils,pydantic,header,Yes
-gateway.modules.shared.jsonUtils,re,header,Yes
-gateway.modules.shared.jsonUtils,typing,header,Yes
-gateway.modules.shared.progressLogger,logging,header,Yes
-gateway.modules.shared.progressLogger,time,header,Yes
-gateway.modules.shared.progressLogger,typing,header,Yes
-gateway.modules.shared.timeUtils,datetime,header,Yes
-gateway.modules.shared.timeUtils,logging,header,Yes
-gateway.modules.shared.timeUtils,time,header,Yes
-gateway.modules.shared.timeUtils,typing,header,Yes
-gateway.modules.workflows.automation.__init__,(relative) .mainWorkflow,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,(relative) .subAutomationUtils,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,json,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,logging,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.datamodels.datamodelUam,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.features.automation.datamodelFeatureAutomation,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.services,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.shared.eventManagement,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.shared.timeUtils,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,modules.workflows.workflowManager,header,Yes
-gateway.modules.workflows.automation.mainWorkflow,typing,header,Yes
-gateway.modules.workflows.automation.subAutomationSchedule,logging,header,Yes
-gateway.modules.workflows.automation.subAutomationSchedule,modules.services,header,Yes
-gateway.modules.workflows.automation.subAutomationSchedule,modules.shared.callbackRegistry,function start,Yes
-gateway.modules.workflows.automation.subAutomationSchedule,modules.workflows.automation,function start,Yes
-gateway.modules.workflows.automation.subAutomationTemplates,typing,header,Yes
-gateway.modules.workflows.automation.subAutomationUtils,datetime,header,Yes
-gateway.modules.workflows.automation.subAutomationUtils,json,header,Yes
-gateway.modules.workflows.automation.subAutomationUtils,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.__init__,(relative) .methodAi,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .convertDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .generateCode,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .generateDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .process,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .summarizeDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .translateDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.__init__,(relative) .webResearch,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.convertDocument,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.convertDocument,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.convertDocument,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,modules.datamodels.datamodelDocref,function generateCode,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,re,function generateCode,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,time,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateCode,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,modules.datamodels.datamodelDocref,function generateDocument,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,re,function generateDocument,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,time,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.generateDocument,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,json,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,modules.datamodels.datamodelDocref,function process,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,modules.datamodels.datamodelDocref,function process,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,modules.datamodels.datamodelWorkflow,function process,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,time,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.process,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.summarizeDocument,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.summarizeDocument,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.summarizeDocument,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.translateDocument,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.translateDocument,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.translateDocument,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.webResearch,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.webResearch,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.webResearch,re,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.webResearch,time,header,Yes
-gateway.modules.workflows.methods.methodAi.actions.webResearch,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.helpers.csvProcessing,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.helpers.csvProcessing,typing,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.convertDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.generateCode,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.generateDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.process,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.summarizeDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.translateDocument,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .actions.webResearch,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,(relative) .helpers.csvProcessing,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,datetime,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,logging,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,modules.shared.frontendTypes,header,Yes
-gateway.modules.workflows.methods.methodAi.methodAi,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.methods.methodBase,datetime,header,Yes
-gateway.modules.workflows.methods.methodBase,functools,header,Yes
-gateway.modules.workflows.methods.methodBase,inspect,header,Yes
-gateway.modules.workflows.methods.methodBase,logging,header,Yes
-gateway.modules.workflows.methods.methodBase,modules.datamodels.datamodelRbac,header,Yes
-gateway.modules.workflows.methods.methodBase,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodBase,re,function _applyValidationRules,Yes
-gateway.modules.workflows.methods.methodBase,re,function _generateMeaningfulFileName,Yes
-gateway.modules.workflows.methods.methodBase,typing,header,Yes
-gateway.modules.workflows.methods.methodChatbot.__init__,(relative) .methodChatbot,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,json,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,logging,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,modules.connectors.connectorPreprocessor,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,modules.datamodels.datamodelDocref,function queryDatabase,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,time,header,Yes
-gateway.modules.workflows.methods.methodChatbot.actions.queryDatabase,typing,header,Yes
-gateway.modules.workflows.methods.methodChatbot.methodChatbot,(relative) .actions.queryDatabase,header,Yes
-gateway.modules.workflows.methods.methodChatbot.methodChatbot,logging,header,Yes
-gateway.modules.workflows.methods.methodChatbot.methodChatbot,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodChatbot.methodChatbot,modules.shared.frontendTypes,header,Yes
-gateway.modules.workflows.methods.methodChatbot.methodChatbot,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.methods.methodContext.__init__,(relative) .methodContext,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.__init__,(relative) .extractContent,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.__init__,(relative) .getDocumentIndex,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.__init__,(relative) .neutralizeData,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.__init__,(relative) .triggerPreprocessingServer,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.extractContent,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.extractContent,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.extractContent,modules.datamodels.datamodelDocref,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.extractContent,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.extractContent,time,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.extractContent,typing,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.getDocumentIndex,json,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.getDocumentIndex,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.getDocumentIndex,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.getDocumentIndex,typing,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.neutralizeData,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.neutralizeData,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.neutralizeData,modules.datamodels.datamodelDocref,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.neutralizeData,modules.datamodels.datamodelExtraction,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.neutralizeData,time,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.neutralizeData,typing,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.triggerPreprocessingServer,aiohttp,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.triggerPreprocessingServer,json,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.triggerPreprocessingServer,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.triggerPreprocessingServer,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.triggerPreprocessingServer,modules.shared.configuration,header,Yes
-gateway.modules.workflows.methods.methodContext.actions.triggerPreprocessingServer,typing,header,Yes
-gateway.modules.workflows.methods.methodContext.helpers.documentIndex,datetime,header,Yes
-gateway.modules.workflows.methods.methodContext.helpers.documentIndex,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.helpers.documentIndex,typing,header,Yes
-gateway.modules.workflows.methods.methodContext.helpers.formatting,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.helpers.formatting,typing,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,(relative) .actions.extractContent,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,(relative) .actions.getDocumentIndex,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,(relative) .actions.neutralizeData,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,(relative) .actions.triggerPreprocessingServer,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,(relative) .helpers.documentIndex,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,(relative) .helpers.formatting,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,logging,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,modules.shared.frontendTypes,header,Yes
-gateway.modules.workflows.methods.methodContext.methodContext,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.methods.methodJira.__init__,(relative) .methodJira,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .connectJira,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .createCsvContent,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .createExcelContent,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .exportTicketsAsJson,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .importTicketsFromJson,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .mergeTicketData,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .parseCsvContent,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.__init__,(relative) .parseExcelContent,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.connectJira,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.connectJira,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.connectJira,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.connectJira,modules.shared.configuration,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.connectJira,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.connectJira,uuid,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,base64,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,csv,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,datetime,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,io,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,pandas,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createCsvContent,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,base64,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,csv,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,datetime,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,io,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,pandas,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.createExcelContent,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.exportTicketsAsJson,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.exportTicketsAsJson,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.exportTicketsAsJson,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.exportTicketsAsJson,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.importTicketsFromJson,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.importTicketsFromJson,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.importTicketsFromJson,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.importTicketsFromJson,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.mergeTicketData,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.mergeTicketData,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.mergeTicketData,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.mergeTicketData,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseCsvContent,io,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseCsvContent,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseCsvContent,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseCsvContent,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseCsvContent,pandas,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseCsvContent,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseExcelContent,io,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseExcelContent,json,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseExcelContent,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseExcelContent,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseExcelContent,pandas,header,Yes
-gateway.modules.workflows.methods.methodJira.actions.parseExcelContent,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.helpers.adfConverter,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.helpers.adfConverter,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.helpers.documentParsing,json,header,Yes
-gateway.modules.workflows.methods.methodJira.helpers.documentParsing,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.helpers.documentParsing,modules.datamodels.datamodelDocref,header,Yes
-gateway.modules.workflows.methods.methodJira.helpers.documentParsing,typing,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.connectJira,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.createCsvContent,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.createExcelContent,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.exportTicketsAsJson,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.importTicketsFromJson,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.mergeTicketData,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.parseCsvContent,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .actions.parseExcelContent,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .helpers.adfConverter,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,(relative) .helpers.documentParsing,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,logging,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,modules.shared.frontendTypes,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.methods.methodJira.methodJira,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.__init__,(relative) .methodOutlook,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.__init__,(relative) .composeAndDraftEmailWithContext,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.__init__,(relative) .readEmails,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.__init__,(relative) .searchEmails,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.__init__,(relative) .sendDraftEmail,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,base64,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,json,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelDocref,function composeAndDraftEmailWithContext,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelDocref,function composeAndDraftEmailWithContext,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelDocref,function composeAndDraftEmailWithContext,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelDocref,function composeAndDraftEmailWithContext,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelDocref,function composeAndDraftEmailWithContext,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,modules.datamodels.datamodelDocref,function composeAndDraftEmailWithContext,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,requests,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.composeAndDraftEmailWithContext,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.readEmails,json,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.readEmails,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.readEmails,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.readEmails,requests,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.readEmails,time,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.readEmails,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.searchEmails,json,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.searchEmails,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.searchEmails,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.searchEmails,requests,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.searchEmails,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,json,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,modules.datamodels.datamodelDocref,function sendDraftEmail,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,requests,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,time,header,Yes
-gateway.modules.workflows.methods.methodOutlook.actions.sendDraftEmail,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.connection,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.connection,requests,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.connection,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.emailProcessing,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.emailProcessing,re,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.emailProcessing,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.folderManagement,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.folderManagement,requests,header,Yes
-gateway.modules.workflows.methods.methodOutlook.helpers.folderManagement,typing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .actions.composeAndDraftEmailWithContext,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .actions.readEmails,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .actions.searchEmails,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .actions.sendDraftEmail,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .helpers.connection,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .helpers.emailProcessing,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,(relative) .helpers.folderManagement,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,datetime,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,logging,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,modules.shared.frontendTypes,header,Yes
-gateway.modules.workflows.methods.methodOutlook.methodOutlook,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.__init__,(relative) .methodSharepoint,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .analyzeFolderUsage,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .copyFile,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .downloadFileByPath,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .findDocumentPath,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .findSiteByUrl,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .listDocuments,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .readDocuments,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .uploadDocument,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.__init__,(relative) .uploadFile,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.analyzeFolderUsage,datetime,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.analyzeFolderUsage,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.analyzeFolderUsage,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.analyzeFolderUsage,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.analyzeFolderUsage,time,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.analyzeFolderUsage,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.copyFile,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.copyFile,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.copyFile,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.copyFile,modules.datamodels.datamodelDocref,function copyFile,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.copyFile,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,base64,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,modules.datamodels.datamodelDocref,function downloadFileByPath,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,os,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.downloadFileByPath,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findDocumentPath,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findDocumentPath,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findDocumentPath,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findDocumentPath,time,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findDocumentPath,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findDocumentPath,urllib.parse,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findSiteByUrl,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findSiteByUrl,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findSiteByUrl,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.findSiteByUrl,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.listDocuments,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.listDocuments,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.listDocuments,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.listDocuments,time,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.listDocuments,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.listDocuments,urllib.parse,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.readDocuments,base64,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.readDocuments,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.readDocuments,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.readDocuments,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.readDocuments,time,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.readDocuments,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadDocument,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadDocument,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadDocument,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadDocument,time,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadDocument,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadDocument,urllib.parse,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,base64,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,modules.datamodels.datamodelDocref,function uploadFile,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,modules.datamodels.datamodelDocref,function uploadFile,Yes
-gateway.modules.workflows.methods.methodSharepoint.actions.uploadFile,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.apiClient,aiohttp,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.apiClient,asyncio,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.apiClient,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.apiClient,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.connection,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.connection,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.documentParsing,json,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.documentParsing,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.documentParsing,modules.datamodels.datamodelDocref,function parseDocumentListForFoundDocuments,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.documentParsing,modules.datamodels.datamodelDocref,function parseDocumentListForFolder,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.documentParsing,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.pathProcessing,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.pathProcessing,re,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.pathProcessing,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.siteDiscovery,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.siteDiscovery,typing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.helpers.siteDiscovery,urllib.parse,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.analyzeFolderUsage,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.copyFile,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.downloadFileByPath,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.findDocumentPath,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.findSiteByUrl,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.listDocuments,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.readDocuments,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.uploadDocument,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .actions.uploadFile,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .helpers.apiClient,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .helpers.connection,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .helpers.documentParsing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .helpers.pathProcessing,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,(relative) .helpers.siteDiscovery,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,logging,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,modules.datamodels.datamodelWorkflowActions,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,modules.shared.frontendTypes,header,Yes
-gateway.modules.workflows.methods.methodSharepoint.methodSharepoint,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.processing.adaptive.__init__,(relative) .contentValidator,header,Yes
-gateway.modules.workflows.processing.adaptive.__init__,(relative) .learningEngine,header,Yes
-gateway.modules.workflows.processing.adaptive.__init__,(relative) .progressTracker,header,Yes
-gateway.modules.workflows.processing.adaptive.adaptiveLearningEngine,collections,header,Yes
-gateway.modules.workflows.processing.adaptive.adaptiveLearningEngine,datetime,header,Yes
-gateway.modules.workflows.processing.adaptive.adaptiveLearningEngine,logging,header,Yes
-gateway.modules.workflows.processing.adaptive.adaptiveLearningEngine,typing,header,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,base64,header,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,csv,function _extractCodeFileStatistics,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,io,function _extractCodeFileStatistics,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,json,header,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,logging,header,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,re,header,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,typing,header,Yes
-gateway.modules.workflows.processing.adaptive.contentValidator,xml.etree.ElementTree,function _extractCodeFileStatistics,Yes
-gateway.modules.workflows.processing.adaptive.learningEngine,datetime,header,Yes
-gateway.modules.workflows.processing.adaptive.learningEngine,logging,header,Yes
-gateway.modules.workflows.processing.adaptive.learningEngine,typing,header,Yes
-gateway.modules.workflows.processing.adaptive.progressTracker,datetime,header,Yes
-gateway.modules.workflows.processing.adaptive.progressTracker,logging,header,Yes
-gateway.modules.workflows.processing.adaptive.progressTracker,typing,header,Yes
-gateway.modules.workflows.processing.core.actionExecutor,logging,header,Yes
-gateway.modules.workflows.processing.core.actionExecutor,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.core.actionExecutor,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.core.actionExecutor,modules.workflows.processing.core.messageCreator,function _createActionCompletionMessage,Yes
-gateway.modules.workflows.processing.core.actionExecutor,modules.workflows.processing.shared.methodDiscovery,header,Yes
-gateway.modules.workflows.processing.core.actionExecutor,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.processing.core.actionExecutor,time,function executeSingleAction,Yes
-gateway.modules.workflows.processing.core.actionExecutor,typing,header,Yes
-gateway.modules.workflows.processing.core.messageCreator,logging,header,Yes
-gateway.modules.workflows.processing.core.messageCreator,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.core.messageCreator,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.core.messageCreator,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.processing.core.messageCreator,typing,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,json,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,logging,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,modules.workflows.processing.shared.promptGenerationTaskplan,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.processing.core.taskPlanner,typing,header,Yes
-gateway.modules.workflows.processing.core.validator,logging,header,Yes
-gateway.modules.workflows.processing.core.validator,typing,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,datetime,function _createActionItem,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,json,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,logging,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,modules.shared.timeUtils,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,modules.workflows.processing.modes.modeBase,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,typing,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,uuid,header,Yes
-gateway.modules.workflows.processing.modes.modeAutomation,uuid,function _createActionItem,Yes
-gateway.modules.workflows.processing.modes.modeBase,abc,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,logging,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,modules.workflows.processing.core.actionExecutor,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,modules.workflows.processing.core.messageCreator,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,modules.workflows.processing.core.taskPlanner,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,modules.workflows.processing.core.validator,header,Yes
-gateway.modules.workflows.processing.modes.modeBase,typing,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,datetime,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,json,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,logging,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelAi,function _planSelect,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelAi,function _actExecute,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelAi,function _refineDecide,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelDocref,function _actExecute,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelDocref,function _planSelect,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelDocref,function _planSelect,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelWorkflow,function _planSelect,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.datamodels.datamodelWorkflow,function _actExecute,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.shared.jsonUtils,function _planSelect,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.shared.jsonUtils,function _actExecute,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.shared.jsonUtils,function _refineDecide,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.shared.timeUtils,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.adaptive,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.adaptive.adaptiveLearningEngine,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.modes.modeBase,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.shared.executionState,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.shared.methodDiscovery,function _actExecute,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.shared.methodDiscovery,function _actExecute,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.shared.placeholderFactory,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.shared.promptGenerationActionsDynamic,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,re,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,time,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,typing,header,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,uuid,function _createActionItem,Yes
-gateway.modules.workflows.processing.modes.modeDynamic,uuid,function _createActionItem,Yes
-gateway.modules.workflows.processing.shared.executionState,logging,header,Yes
-gateway.modules.workflows.processing.shared.executionState,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.shared.executionState,typing,header,Yes
-gateway.modules.workflows.processing.shared.methodDiscovery,importlib,header,Yes
-gateway.modules.workflows.processing.shared.methodDiscovery,inspect,header,Yes
-gateway.modules.workflows.processing.shared.methodDiscovery,logging,header,Yes
-gateway.modules.workflows.processing.shared.methodDiscovery,modules.workflows.methods.methodBase,header,Yes
-gateway.modules.workflows.processing.shared.methodDiscovery,pkgutil,header,Yes
-gateway.modules.workflows.processing.shared.methodDiscovery,typing,header,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,json,header,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,logging,header,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,modules.datamodels.datamodelChat,function extractReviewContent,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,modules.datamodels.datamodelChat,function extractReviewContent,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,modules.interfaces.interfaceDbApp,function extractLatestRefinementFeedback,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,modules.interfaces.interfaceDbChat,function extractLatestRefinementFeedback,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,modules.workflows.processing.shared.methodDiscovery,header,Yes
-gateway.modules.workflows.processing.shared.placeholderFactory,typing,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationActionsDynamic,json,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationActionsDynamic,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationActionsDynamic,modules.workflows.processing.shared.methodDiscovery,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationActionsDynamic,modules.workflows.processing.shared.placeholderFactory,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationActionsDynamic,typing,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationTaskplan,logging,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationTaskplan,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationTaskplan,modules.workflows.processing.shared.placeholderFactory,header,Yes
-gateway.modules.workflows.processing.shared.promptGenerationTaskplan,typing,header,Yes
-gateway.modules.workflows.processing.shared.stateTools,logging,header,Yes
-gateway.modules.workflows.processing.shared.stateTools,typing,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,json,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,logging,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.datamodels,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.datamodels.datamodelAi,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.shared.jsonUtils,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.workflows.processing.modes.modeAutomation,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.workflows.processing.modes.modeBase,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.workflows.processing.modes.modeDynamic,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.processing.workflowProcessor,time,function generateTaskPlan,Yes
-gateway.modules.workflows.processing.workflowProcessor,time,function executeTask,Yes
-gateway.modules.workflows.processing.workflowProcessor,traceback,function fastPathExecute,Yes
-gateway.modules.workflows.processing.workflowProcessor,typing,header,Yes
-gateway.modules.workflows.workflowManager,asyncio,header,Yes
-gateway.modules.workflows.workflowManager,json,header,Yes
-gateway.modules.workflows.workflowManager,logging,header,Yes
-gateway.modules.workflows.workflowManager,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.workflowManager,modules.datamodels.datamodelChat,header,Yes
-gateway.modules.workflows.workflowManager,modules.datamodels.datamodelWorkflow,function _executeTasks,Yes
-gateway.modules.workflows.workflowManager,modules.workflows.processing.shared.methodDiscovery,function workflowStart,Yes
-gateway.modules.workflows.workflowManager,modules.workflows.processing.shared.methodDiscovery,function workflowStart,Yes
-gateway.modules.workflows.workflowManager,modules.workflows.processing.shared.placeholderFactory,function _checkIfHistoryAvailable,Yes
-gateway.modules.workflows.workflowManager,modules.workflows.processing.shared.stateTools,header,Yes
-gateway.modules.workflows.workflowManager,modules.workflows.processing.workflowProcessor,header,Yes
-gateway.modules.workflows.workflowManager,typing,header,Yes
-gateway.modules.workflows.workflowManager,uuid,header,Yes
-gateway.scripts.script_analyze_function_imports,collections,header,Yes
-gateway.scripts.script_analyze_function_imports,csv,header,Yes
-gateway.scripts.script_analyze_function_imports,pathlib,header,Yes
-gateway.scripts.script_analyze_function_imports,typing,header,Yes
-gateway.scripts.script_analyze_imports,ast,header,Yes
-gateway.scripts.script_analyze_imports,csv,header,Yes
-gateway.scripts.script_analyze_imports,os,header,Yes
-gateway.scripts.script_analyze_imports,pathlib,header,Yes
-gateway.scripts.script_analyze_imports,sys,header,Yes
-gateway.scripts.script_analyze_imports,typing,header,Yes
-gateway.scripts.script_db_adapt_to_models,argparse,header,Yes
-gateway.scripts.script_db_adapt_to_models,ast,function _parsePydanticModels,Yes
-gateway.scripts.script_db_adapt_to_models,ast,function _extractType,Yes
-gateway.scripts.script_db_adapt_to_models,json,header,Yes
-gateway.scripts.script_db_adapt_to_models,logging,header,Yes
-gateway.scripts.script_db_adapt_to_models,modules.shared.configuration,header,Yes
-gateway.scripts.script_db_adapt_to_models,os,header,Yes
-gateway.scripts.script_db_adapt_to_models,pathlib,header,Yes
-gateway.scripts.script_db_adapt_to_models,psycopg2,header,Yes
-gateway.scripts.script_db_adapt_to_models,psycopg2.extras,header,Yes
-gateway.scripts.script_db_adapt_to_models,sys,header,Yes
-gateway.scripts.script_db_adapt_to_models,typing,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,argparse,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,dotenv,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,logging,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,modules.datamodels.datamodelRbac,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,modules.security.rootAccess,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,os,header,Yes
-gateway.scripts.script_db_cleanup_duplicate_roles,sys,header,Yes
-gateway.scripts.script_db_export_migration,argparse,header,Yes
-gateway.scripts.script_db_export_migration,datetime,header,Yes
-gateway.scripts.script_db_export_migration,json,header,Yes
-gateway.scripts.script_db_export_migration,logging,header,Yes
-gateway.scripts.script_db_export_migration,modules.shared.configuration,header,Yes
-gateway.scripts.script_db_export_migration,os,header,Yes
-gateway.scripts.script_db_export_migration,pathlib,header,Yes
-gateway.scripts.script_db_export_migration,psycopg2,header,Yes
-gateway.scripts.script_db_export_migration,psycopg2.extras,header,Yes
-gateway.scripts.script_db_export_migration,sys,header,Yes
-gateway.scripts.script_db_export_migration,typing,header,Yes
-gateway.scripts.script_generate_container_diagram,collections,header,Yes
-gateway.scripts.script_generate_container_diagram,csv,header,Yes
-gateway.scripts.script_generate_container_diagram,html,header,Yes
-gateway.scripts.script_generate_container_diagram,math,header,Yes
-gateway.scripts.script_generate_container_diagram,pathlib,header,Yes
-gateway.scripts.script_generate_container_diagram,typing,header,Yes
-gateway.scripts.script_generate_import_diagram,collections,header,Yes
-gateway.scripts.script_generate_import_diagram,csv,header,Yes
-gateway.scripts.script_generate_import_diagram,html,header,Yes
-gateway.scripts.script_generate_import_diagram,pathlib,header,Yes
-gateway.scripts.script_generate_import_diagram,typing,header,Yes
-gateway.scripts.script_generate_import_diagram,xml.etree.ElementTree,header,Yes
-gateway.scripts.script_remove_redundant_imports,ast,header,Yes
-gateway.scripts.script_remove_redundant_imports,collections,header,Yes
-gateway.scripts.script_remove_redundant_imports,csv,header,Yes
-gateway.scripts.script_remove_redundant_imports,pathlib,header,Yes
-gateway.scripts.script_remove_redundant_imports,re,header,Yes
-gateway.scripts.script_remove_redundant_imports,typing,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,argparse,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,datetime,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,modules.shared.configuration,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,pathlib,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,shutil,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,sys,header,Yes
-gateway.scripts.script_security_encrypt_all_env_files,typing,header,Yes
-gateway.scripts.script_security_encrypt_config_value,argparse,header,Yes
-gateway.scripts.script_security_encrypt_config_value,datetime,header,Yes
-gateway.scripts.script_security_encrypt_config_value,json,header,Yes
-gateway.scripts.script_security_encrypt_config_value,modules.shared.configuration,header,Yes
-gateway.scripts.script_security_encrypt_config_value,modules.shared.configuration,function main,Yes
-gateway.scripts.script_security_encrypt_config_value,os,header,Yes
-gateway.scripts.script_security_encrypt_config_value,pathlib,header,Yes
-gateway.scripts.script_security_encrypt_config_value,shutil,header,Yes
-gateway.scripts.script_security_encrypt_config_value,sys,header,Yes
-gateway.scripts.script_security_generate_master_keys,argparse,header,Yes
-gateway.scripts.script_security_generate_master_keys,base64,header,Yes
-gateway.scripts.script_security_generate_master_keys,os,header,Yes
-gateway.scripts.script_security_generate_master_keys,pathlib,header,Yes
-gateway.scripts.script_security_generate_master_keys,secrets,header,Yes
-gateway.scripts.script_security_generate_master_keys,sys,header,Yes
-gateway.scripts.script_stats_durations_from_log,argparse,header,Yes
-gateway.scripts.script_stats_durations_from_log,csv,header,Yes
-gateway.scripts.script_stats_durations_from_log,datetime,header,Yes
-gateway.scripts.script_stats_durations_from_log,re,header,Yes
-gateway.scripts.script_stats_durations_from_log,typing,header,Yes
-gateway.scripts.script_stats_get_codelines,argparse,header,Yes
-gateway.scripts.script_stats_get_codelines,os,header,Yes
-gateway.scripts.script_stats_get_codelines,pathlib,header,Yes
-gateway.scripts.script_stats_get_codelines,typing,header,Yes
-gateway.scripts.script_stats_showUnusedFunctions,ast,header,Yes
-gateway.scripts.script_stats_showUnusedFunctions,logging,header,Yes
-gateway.scripts.script_stats_showUnusedFunctions,os,header,Yes
-gateway.scripts.script_stats_showUnusedFunctions,pathlib,header,Yes
-gateway.scripts.script_stats_showUnusedFunctions,re,header,Yes
-gateway.scripts.script_stats_showUnusedFunctions,typing,header,Yes
-gateway.tests.conftest,os,header,Yes
-gateway.tests.conftest,pathlib,header,Yes
-gateway.tests.conftest,sys,header,Yes
-gateway.tests.functional.test01_ai_model_selection,asyncio,header,Yes
-gateway.tests.functional.test01_ai_model_selection,base64,header,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.aicore.aicoreModelRegistry,header,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.aicore.aicoreModelSelector,header,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.interfaces.interfaceAiObjects,function initialize,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.services,header,Yes
-gateway.tests.functional.test01_ai_model_selection,modules.services.serviceAi.mainServiceAi,function initialize,Yes
-gateway.tests.functional.test01_ai_model_selection,os,header,Yes
-gateway.tests.functional.test01_ai_model_selection,sys,header,Yes
-gateway.tests.functional.test02_ai_models,asyncio,header,Yes
-gateway.tests.functional.test02_ai_models,base64,header,Yes
-gateway.tests.functional.test02_ai_models,base64,function _createTestImage,Yes
-gateway.tests.functional.test02_ai_models,base64,function _saveImageResponse,Yes
-gateway.tests.functional.test02_ai_models,base64,function testModelOperation,Yes
-gateway.tests.functional.test02_ai_models,collections,function printTestSummary,Yes
-gateway.tests.functional.test02_ai_models,datetime,header,Yes
-gateway.tests.functional.test02_ai_models,json,header,Yes
-gateway.tests.functional.test02_ai_models,json,function testModelOperation,Yes
-gateway.tests.functional.test02_ai_models,json,function testModelOperation,Yes
-gateway.tests.functional.test02_ai_models,logging,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.aicore.aicoreModelRegistry,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.aicore.aicoreModelRegistry,function testModel,Yes
-gateway.tests.functional.test02_ai_models,modules.aicore.aicoreModelRegistry,function getAllAvailableModels,Yes
-gateway.tests.functional.test02_ai_models,modules.aicore.aicorePluginPerplexity,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.aicore.aicorePluginTavily,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelAi,function _getTestPromptForOperation,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelAi,function getAllAvailableModels,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelAi,function testModelOperation,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelAi,function testModelOperation,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelChat,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test02_ai_models,modules.services,header,Yes
-gateway.tests.functional.test02_ai_models,modules.services.serviceAi.mainServiceAi,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.services.serviceExtraction.mainServiceExtraction,function initialize,Yes
-gateway.tests.functional.test02_ai_models,modules.shared.configuration,function _testTavilyDirect,Yes
-gateway.tests.functional.test02_ai_models,os,header,Yes
-gateway.tests.functional.test02_ai_models,sys,header,Yes
-gateway.tests.functional.test02_ai_models,tavily,function _testTavilyDirect,Yes
-gateway.tests.functional.test02_ai_models,typing,header,Yes
-gateway.tests.functional.test02_ai_models,uuid,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,asyncio,header,Yes
-gateway.tests.functional.test03_ai_operations,datetime,header,Yes
-gateway.tests.functional.test03_ai_operations,json,function printSummary,Yes
-gateway.tests.functional.test03_ai_operations,json,function testOperation,Yes
-gateway.tests.functional.test03_ai_operations,logging,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.functional.test03_ai_operations,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.functional.test03_ai_operations,modules.datamodels.datamodelChat,function _prepareTestImageDocument,Yes
-gateway.tests.functional.test03_ai_operations,modules.datamodels.datamodelChat,function _prepareTestImageDocument,Yes
-gateway.tests.functional.test03_ai_operations,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test03_ai_operations,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test03_ai_operations,modules.interfaces.interfaceDbChat,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,modules.interfaces.interfaceDbChat,function _prepareTestImageDocument,Yes
-gateway.tests.functional.test03_ai_operations,modules.interfaces.interfaceDbChat,function testOperation,Yes
-gateway.tests.functional.test03_ai_operations,modules.services,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,modules.workflows.methods.methodAi,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,os,header,Yes
-gateway.tests.functional.test03_ai_operations,sys,header,Yes
-gateway.tests.functional.test03_ai_operations,time,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,time,function _prepareTestImageDocument,Yes
-gateway.tests.functional.test03_ai_operations,time,function testOperation,Yes
-gateway.tests.functional.test03_ai_operations,typing,header,Yes
-gateway.tests.functional.test03_ai_operations,uuid,function initialize,Yes
-gateway.tests.functional.test03_ai_operations,uuid,function _prepareTestImageDocument,Yes
-gateway.tests.functional.test03_ai_operations,uuid,function testOperation,Yes
-gateway.tests.functional.test04_ai_behavior,asyncio,header,Yes
-gateway.tests.functional.test04_ai_behavior,glob,function _getLatestDebugResponse,Yes
-gateway.tests.functional.test04_ai_behavior,json,header,Yes
-gateway.tests.functional.test04_ai_behavior,logging,function initialize,Yes
-gateway.tests.functional.test04_ai_behavior,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.functional.test04_ai_behavior,modules.datamodels.datamodelChat,function initialize,Yes
-gateway.tests.functional.test04_ai_behavior,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test04_ai_behavior,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.tests.functional.test04_ai_behavior,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test04_ai_behavior,modules.interfaces.interfaceDbChat,function initialize,Yes
-gateway.tests.functional.test04_ai_behavior,modules.services,header,Yes
-gateway.tests.functional.test04_ai_behavior,os,header,Yes
-gateway.tests.functional.test04_ai_behavior,sys,header,Yes
-gateway.tests.functional.test04_ai_behavior,time,function initialize,Yes
-gateway.tests.functional.test04_ai_behavior,traceback,function testPromptBehavior,Yes
-gateway.tests.functional.test04_ai_behavior,typing,header,Yes
-gateway.tests.functional.test04_ai_behavior,uuid,function initialize,Yes
-gateway.tests.functional.test05_workflow_with_documents,asyncio,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,json,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,logging,function initialize,Yes
-gateway.tests.functional.test05_workflow_with_documents,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test05_workflow_with_documents,modules.interfaces.interfaceDbChat,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,modules.services,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,modules.workflows.automation,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,os,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,sys,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,time,header,Yes
-gateway.tests.functional.test05_workflow_with_documents,traceback,function runTest,Yes
-gateway.tests.functional.test05_workflow_with_documents,typing,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,asyncio,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,json,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,logging,function initialize,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,modules.interfaces.interfaceDbChat,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,modules.services,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,modules.workflows.automation,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,os,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,sys,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,time,header,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,traceback,function testSimplePrompt,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,traceback,function testMergeDocumentsToWord,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,traceback,function testStructuredDataToExcel,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,traceback,function runAllTests,Yes
-gateway.tests.functional.test06_workflow_prompt_variations,typing,header,Yes
-gateway.tests.functional.test07_json_merge,json,header,Yes
-gateway.tests.functional.test07_json_merge,modules.services.serviceAi.subJsonResponseHandling,header,Yes
-gateway.tests.functional.test07_json_merge,modules.shared.jsonUtils,header,Yes
-gateway.tests.functional.test07_json_merge,os,header,Yes
-gateway.tests.functional.test07_json_merge,sys,header,Yes
-gateway.tests.functional.test07_json_merge,traceback,header,Yes
-gateway.tests.functional.test08_json_finalization,json,header,Yes
-gateway.tests.functional.test08_json_finalization,modules.services.serviceAi.subJsonResponseHandling,header,Yes
-gateway.tests.functional.test08_json_finalization,modules.shared.jsonUtils,header,Yes
-gateway.tests.functional.test08_json_finalization,os,header,Yes
-gateway.tests.functional.test08_json_finalization,sys,header,Yes
-gateway.tests.functional.test08_json_finalization,traceback,function testEndToEndFinalizationWithCorruption,Yes
-gateway.tests.functional.test08_json_finalization,traceback,header,Yes
-gateway.tests.functional.test09_document_generation_formats,asyncio,header,Yes
-gateway.tests.functional.test09_document_generation_formats,base64,header,Yes
-gateway.tests.functional.test09_document_generation_formats,json,header,Yes
-gateway.tests.functional.test09_document_generation_formats,logging,function initialize,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.interfaces.interfaceDbChat,header,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.services,header,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.shared.configuration,function initialize,Yes
-gateway.tests.functional.test09_document_generation_formats,modules.workflows.automation,header,Yes
-gateway.tests.functional.test09_document_generation_formats,os,header,Yes
-gateway.tests.functional.test09_document_generation_formats,sys,header,Yes
-gateway.tests.functional.test09_document_generation_formats,time,header,Yes
-gateway.tests.functional.test09_document_generation_formats,traceback,function uploadPdfFile,Yes
-gateway.tests.functional.test09_document_generation_formats,traceback,function runTest,Yes
-gateway.tests.functional.test09_document_generation_formats,traceback,function testRefactoringFeatures,Yes
-gateway.tests.functional.test09_document_generation_formats,traceback,function testAllFormats,Yes
-gateway.tests.functional.test09_document_generation_formats,typing,header,Yes
-gateway.tests.functional.test10_document_generation_formats,asyncio,header,Yes
-gateway.tests.functional.test10_document_generation_formats,base64,header,Yes
-gateway.tests.functional.test10_document_generation_formats,json,header,Yes
-gateway.tests.functional.test10_document_generation_formats,logging,function initialize,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.interfaces.interfaceDbChat,header,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.services,header,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.shared.configuration,function initialize,Yes
-gateway.tests.functional.test10_document_generation_formats,modules.workflows.automation,header,Yes
-gateway.tests.functional.test10_document_generation_formats,os,header,Yes
-gateway.tests.functional.test10_document_generation_formats,sys,header,Yes
-gateway.tests.functional.test10_document_generation_formats,time,header,Yes
-gateway.tests.functional.test10_document_generation_formats,traceback,function uploadPdfFile,Yes
-gateway.tests.functional.test10_document_generation_formats,traceback,function runTest,Yes
-gateway.tests.functional.test10_document_generation_formats,traceback,function testAllFormats,Yes
-gateway.tests.functional.test10_document_generation_formats,typing,header,Yes
-gateway.tests.functional.test11_code_generation_formats,asyncio,header,Yes
-gateway.tests.functional.test11_code_generation_formats,csv,header,Yes
-gateway.tests.functional.test11_code_generation_formats,io,header,Yes
-gateway.tests.functional.test11_code_generation_formats,json,header,Yes
-gateway.tests.functional.test11_code_generation_formats,logging,function initialize,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.interfaces.interfaceDbApp,function __init__,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.interfaces.interfaceDbChat,header,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.services,header,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.shared.configuration,function initialize,Yes
-gateway.tests.functional.test11_code_generation_formats,modules.workflows.automation,header,Yes
-gateway.tests.functional.test11_code_generation_formats,os,header,Yes
-gateway.tests.functional.test11_code_generation_formats,sys,header,Yes
-gateway.tests.functional.test11_code_generation_formats,time,header,Yes
-gateway.tests.functional.test11_code_generation_formats,traceback,function runTest,Yes
-gateway.tests.functional.test11_code_generation_formats,traceback,function testAllFormats,Yes
-gateway.tests.functional.test11_code_generation_formats,typing,header,Yes
-gateway.tests.functional.test11_code_generation_formats,xml.etree.ElementTree,header,Yes
-gateway.tests.functional.test12_json_split_merge,asyncio,header,Yes
-gateway.tests.functional.test12_json_split_merge,json,header,Yes
-gateway.tests.functional.test12_json_split_merge,modules.services.serviceAi.subJsonMerger,header,Yes
-gateway.tests.functional.test12_json_split_merge,modules.shared.jsonContinuation,header,Yes
-gateway.tests.functional.test12_json_split_merge,modules.shared.jsonUtils,function _loadTableJsonExample,Yes
-gateway.tests.functional.test12_json_split_merge,modules.shared.jsonUtils,function testJsonSplitMerge,Yes
-gateway.tests.functional.test12_json_split_merge,modules.shared.jsonUtils,function normalizeJson,Yes
-gateway.tests.functional.test12_json_split_merge,os,header,Yes
-gateway.tests.functional.test12_json_split_merge,random,header,Yes
-gateway.tests.functional.test12_json_split_merge,random,function testJsonSplitMerge,Yes
-gateway.tests.functional.test12_json_split_merge,sys,header,Yes
-gateway.tests.functional.test12_json_split_merge,time,header,Yes
-gateway.tests.functional.test12_json_split_merge,traceback,function runTest,Yes
-gateway.tests.functional.test12_json_split_merge,traceback,function testAllJsonFiles,Yes
-gateway.tests.functional.test12_json_split_merge,typing,header,Yes
-gateway.tests.functional.test13_json_completion_cuts,asyncio,header,Yes
-gateway.tests.functional.test13_json_completion_cuts,json,header,Yes
-gateway.tests.functional.test13_json_completion_cuts,modules.shared.jsonContinuation,header,Yes
-gateway.tests.functional.test13_json_completion_cuts,os,header,Yes
-gateway.tests.functional.test13_json_completion_cuts,sys,header,Yes
-gateway.tests.functional.test13_json_completion_cuts,traceback,function runTest,Yes
-gateway.tests.functional.test13_json_completion_cuts,typing,header,Yes
-gateway.tests.functional.test14_json_continuation_context,asyncio,header,Yes
-gateway.tests.functional.test14_json_continuation_context,json,header,Yes
-gateway.tests.functional.test14_json_continuation_context,modules.shared.jsonContinuation,header,Yes
-gateway.tests.functional.test14_json_continuation_context,os,header,Yes
-gateway.tests.functional.test14_json_continuation_context,sys,header,Yes
-gateway.tests.functional.test14_json_continuation_context,traceback,function testSpecificCutJson,Yes
-gateway.tests.functional.test14_json_continuation_context,traceback,function runTest,Yes
-gateway.tests.functional.test14_json_continuation_context,typing,header,Yes
-gateway.tests.functional.test_kpi_full,json,header,Yes
-gateway.tests.functional.test_kpi_full,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.functional.test_kpi_full,modules.services.serviceAi.subJsonResponseHandling,header,Yes
-gateway.tests.functional.test_kpi_full,modules.shared.jsonUtils,header,Yes
-gateway.tests.functional.test_kpi_full,os,header,Yes
-gateway.tests.functional.test_kpi_full,pytest,header,Yes
-gateway.tests.functional.test_kpi_full,sys,header,Yes
-gateway.tests.functional.test_kpi_incomplete,json,header,Yes
-gateway.tests.functional.test_kpi_incomplete,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.functional.test_kpi_incomplete,modules.services.serviceAi.subJsonResponseHandling,header,Yes
-gateway.tests.functional.test_kpi_incomplete,modules.shared.jsonUtils,header,Yes
-gateway.tests.functional.test_kpi_incomplete,os,header,Yes
-gateway.tests.functional.test_kpi_incomplete,pytest,header,Yes
-gateway.tests.functional.test_kpi_incomplete,sys,header,Yes
-gateway.tests.functional.test_kpi_incomplete,traceback,header,Yes
-gateway.tests.functional.test_kpi_path,json,header,Yes
-gateway.tests.functional.test_kpi_path,modules.services.serviceAi.subJsonResponseHandling,header,Yes
-gateway.tests.functional.test_kpi_path,os,header,Yes
-gateway.tests.functional.test_kpi_path,sys,header,Yes
-gateway.tests.functional.test_kpi_path,traceback,header,Yes
-gateway.tests.integration.rbac.test_rbac_database,modules.connectors.connectorDbPostgre,header,Yes
-gateway.tests.integration.rbac.test_rbac_database,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.integration.rbac.test_rbac_database,modules.datamodels.datamodelUam,function testBuildRbacWhereClauseUserConnectionTable,Yes
-gateway.tests.integration.rbac.test_rbac_database,modules.shared.configuration,header,Yes
-gateway.tests.integration.rbac.test_rbac_database,pytest,header,Yes
-gateway.tests.integration.workflows.test_workflow_execution,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.integration.workflows.test_workflow_execution,modules.datamodels.datamodelDocref,header,Yes
-gateway.tests.integration.workflows.test_workflow_execution,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.tests.integration.workflows.test_workflow_execution,modules.datamodels.datamodelWorkflow,function test_extractContentParameters_structure,Yes
-gateway.tests.integration.workflows.test_workflow_execution,modules.shared.jsonUtils,function test_parseJsonWithModel_with_code_fences,Yes
-gateway.tests.integration.workflows.test_workflow_execution,modules.shared.jsonUtils,function test_parseJsonWithModel_with_extra_text,Yes
-gateway.tests.integration.workflows.test_workflow_execution,pytest,header,Yes
-gateway.tests.integration.workflows.test_workflow_execution,unittest.mock,header,Yes
-gateway.tests.integration.workflows.test_workflow_execution,uuid,header,Yes
-gateway.tests.unit.datamodels.test_docref,modules.datamodels.datamodelDocref,header,Yes
-gateway.tests.unit.datamodels.test_docref,pytest,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,json,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,modules.datamodels.datamodelAi,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,modules.datamodels.datamodelDocref,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,modules.datamodels.datamodelExtraction,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,pytest,header,Yes
-gateway.tests.unit.datamodels.test_workflow_models,typing,header,Yes
-gateway.tests.unit.rbac.test_rbac_bootstrap,modules.datamodels.datamodelRbac,header,Yes
-gateway.tests.unit.rbac.test_rbac_bootstrap,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.unit.rbac.test_rbac_bootstrap,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.unit.rbac.test_rbac_bootstrap,modules.interfaces.interfaceBootstrap,header,Yes
-gateway.tests.unit.rbac.test_rbac_bootstrap,pytest,header,Yes
-gateway.tests.unit.rbac.test_rbac_bootstrap,unittest.mock,header,Yes
-gateway.tests.unit.rbac.test_rbac_permissions,modules.connectors.connectorDbPostgre,header,Yes
-gateway.tests.unit.rbac.test_rbac_permissions,modules.datamodels.datamodelRbac,header,Yes
-gateway.tests.unit.rbac.test_rbac_permissions,modules.datamodels.datamodelUam,header,Yes
-gateway.tests.unit.rbac.test_rbac_permissions,modules.security.rbac,header,Yes
-gateway.tests.unit.rbac.test_rbac_permissions,pytest,header,Yes
-gateway.tests.unit.rbac.test_rbac_permissions,unittest.mock,header,Yes
-gateway.tests.unit.services.test_json_extraction_merging,json,header,Yes
-gateway.tests.unit.services.test_json_extraction_merging,modules.datamodels.datamodelExtraction,header,Yes
-gateway.tests.unit.services.test_json_extraction_merging,modules.services.serviceExtraction.mainServiceExtraction,header,Yes
-gateway.tests.unit.services.test_json_extraction_merging,os,header,Yes
-gateway.tests.unit.services.test_json_extraction_merging,sys,header,Yes
-gateway.tests.unit.services.test_json_extraction_merging,traceback,function main,Yes
-gateway.tests.unit.utils.test_json_utils,json,header,Yes
-gateway.tests.unit.utils.test_json_utils,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.tests.unit.utils.test_json_utils,modules.shared.jsonUtils,header,Yes
-gateway.tests.unit.utils.test_json_utils,pytest,header,Yes
-gateway.tests.unit.workflows.test_state_management,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.unit.workflows.test_state_management,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.tests.unit.workflows.test_state_management,pytest,header,Yes
-gateway.tests.unit.workflows.test_state_management,uuid,header,Yes
-gateway.tests.validation.test_architecture_validation,modules.datamodels.datamodelChat,header,Yes
-gateway.tests.validation.test_architecture_validation,modules.datamodels.datamodelDocref,header,Yes
-gateway.tests.validation.test_architecture_validation,modules.datamodels.datamodelWorkflow,header,Yes
-gateway.tests.validation.test_architecture_validation,modules.shared.jsonUtils,header,Yes
-gateway.tests.validation.test_architecture_validation,os,header,Yes
-gateway.tests.validation.test_architecture_validation,pytest,header,Yes
-gateway.tests.validation.test_architecture_validation,sys,header,Yes
diff --git a/scripts/migrate_async_to_sync.py b/scripts/migrate_async_to_sync.py
index 4e18b3ea..8b5626df 100644
--- a/scripts/migrate_async_to_sync.py
+++ b/scripts/migrate_async_to_sync.py
@@ -39,13 +39,6 @@ AUTH_DIR = GATEWAY_DIR / "modules" / "auth"
# Value: set of function names that must remain async def
_MUST_STAY_ASYNC: Dict[str, Set[str]] = {
# --- routes/ ---
- "modules/routes/routeAdminAutomationEvents.py": {
- "sync_all_automation_events", # await syncAutomationEvents(...)
- },
- "modules/routes/routeAdminRbacExport.py": {
- "import_global_rbac", # await file.read()
- "import_mandate_rbac", # await file.read()
- },
"modules/routes/routeDataConnections.py": {
"get_connections", # await token_refresh_service.refresh_expired_tokens(...)
},
@@ -68,9 +61,6 @@ _MUST_STAY_ASYNC: Dict[str, Set[str]] = {
"refresh_token", # await request.json()
},
# --- features/ ---
- "modules/features/automation/routeFeatureAutomation.py": {
- "execute_automation_route", # await executeAutomation(...)
- },
"modules/features/chatbot/routeFeatureChatbot.py": {
"stream_chatbot_start", # await chatProcess(...), contains async event_stream generator
"event_stream", # await request.is_disconnected(), await asyncio.wait_for(...)
diff --git a/scripts/script_db_adapt_to_models.py b/scripts/script_db_adapt_to_models.py
index 163c4cb8..6e5ca7a3 100644
--- a/scripts/script_db_adapt_to_models.py
+++ b/scripts/script_db_adapt_to_models.py
@@ -41,7 +41,7 @@ from modules.shared.configuration import APP_CONFIG
DATABASE_CONFIG = {
"poweron_app": ("DB_APP", ["datamodelUam", "datamodelRbac", "datamodelSecurity"]),
"poweron_chat": ("DB_CHAT", ["datamodelChat"]),
- "poweron_management": ("DB_MANAGEMENT", ["datamodelWorkflow", "datamodelFiles"]),
+ "poweron_management": ("DB_MANAGEMENT", ["datamodelWorkflow", "datamodelFiles", "datamodelUiLanguage"]),
}
# Python-Typ → PostgreSQL-Typ Mapping
diff --git a/scripts/script_db_export_migration.py b/scripts/script_db_export_migration.py
index 73c13b25..f286327a 100644
--- a/scripts/script_db_export_migration.py
+++ b/scripts/script_db_export_migration.py
@@ -102,8 +102,6 @@ except Exception as e:
# Alle PowerOn Datenbanken (für Export / Migration-Skripte)
ALL_DATABASES = [
"poweron_app",
- "poweron_automation",
- "poweron_automation2",
"poweron_billing",
"poweron_chat",
"poweron_chatbot",
@@ -128,8 +126,6 @@ DATABASE_CONFIG = {
"poweron_management": "DB_MANAGEMENT",
"poweron_realestate": "DB_REALESTATE",
"poweron_trustee": "DB_TRUSTEE",
- "poweron_automation": "DB",
- "poweron_automation2": "DB",
"poweron_billing": "DB",
"poweron_commcoach": "DB",
"poweron_knowledge": "DB",
diff --git a/tests/demo/README.md b/tests/demo/README.md
new file mode 100644
index 00000000..6887f94a
--- /dev/null
+++ b/tests/demo/README.md
@@ -0,0 +1,35 @@
+# Demo Test Suite
+
+Automated tests for the investor demo configuration.
+
+## Prerequisites
+
+1. Gateway DB must be running and accessible
+2. Demo config must be loaded first: Admin UI → `/admin/demo-config` → Load "Investor Demo April 2026"
+3. RMA credentials must be set in `gateway/config.ini`
+
+## Run
+
+```bash
+cd gateway/
+
+# All demo tests (structural, no AI calls):
+pytest tests/demo/ -v
+
+# Only bootstrap tests:
+pytest tests/demo/test_demo_bootstrap.py -v
+
+# Only UC1 trustee:
+pytest tests/demo/test_demo_uc1_trustee.py -v
+```
+
+## Test files
+
+| File | What it tests |
+|------|--------------|
+| `test_demo_bootstrap.py` | Idempotent load/remove, mandates, user, features, RMA, neutralization |
+| `test_demo_uc1_trustee.py` | Trustee instances, RMA config, system workflow templates |
+| `test_demo_uc2_realestate.py` | Workspace instances for agent demo |
+| `test_demo_uc3_chatbot.py` | Chatbot instance, knowledge-base files |
+| `test_demo_uc4_i18n.py` | i18n readiness, Spanish not pre-installed |
+| `test_demo_neutralization.py` | Neutralization config enabled, test PDF exists |
diff --git a/tests/demo/__init__.py b/tests/demo/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/demo/conftest.py b/tests/demo/conftest.py
new file mode 100644
index 00000000..79bf452b
--- /dev/null
+++ b/tests/demo/conftest.py
@@ -0,0 +1,64 @@
+"""
+Demo test fixtures.
+
+Provides a live DB connector and helpers for the demo test suite.
+All tests assume the gateway is configured and the DB is reachable.
+"""
+
+import pytest
+from modules.security.rootAccess import getRootDbAppConnector
+from modules.datamodels.datamodelUam import Mandate, UserInDB
+from modules.datamodels.datamodelFeatures import FeatureInstance
+from modules.datamodels.datamodelMembership import UserMandate
+
+
+@pytest.fixture(scope="session")
+def db():
+ """Root DB connector (session-scoped, reused across all tests)."""
+ return getRootDbAppConnector()
+
+
+@pytest.fixture(scope="session")
+def demoConfig():
+ """The investor demo config instance."""
+ from modules.demoConfigs import _getDemoConfigByCode
+ cfg = _getDemoConfigByCode("investor-demo-2026")
+ assert cfg is not None, "Demo config 'investor-demo-2026' not found — check modules/demoConfigs/"
+ return cfg
+
+
+# ---------------------------------------------------------------------------
+# Mandate helpers — function-scoped so they always reflect current DB state
+# (test_removeAndReload recreates mandates with new IDs mid-session)
+# ---------------------------------------------------------------------------
+
+@pytest.fixture
+def mandateHappylife(db):
+ """HappyLife AG mandate (must exist after bootstrap load)."""
+ records = db.getRecordset(Mandate, recordFilter={"name": "happylife"})
+ assert records, "Mandate 'happylife' not found — run demo config load first"
+ return records[0]
+
+
+@pytest.fixture
+def mandateAlpina(db):
+ """Alpina Treuhand AG mandate (must exist after bootstrap load)."""
+ records = db.getRecordset(Mandate, recordFilter={"name": "alpina-treuhand"})
+ assert records, "Mandate 'alpina-treuhand' not found — run demo config load first"
+ return records[0]
+
+
+@pytest.fixture
+def demoUser(db):
+ """Patrick Helvetia user (must exist after bootstrap load)."""
+ records = db.getRecordset(UserInDB, recordFilter={"username": "patrick.helvetia"})
+ assert records, "User 'patrick.helvetia' not found — run demo config load first"
+ return records[0]
+
+
+def _getFeatureInstances(db, mandateId: str, featureCode: str):
+ """Helper: get feature instances for a mandate + code."""
+ return db.getRecordset(FeatureInstance, recordFilter={
+ "mandateId": mandateId,
+ "featureCode": featureCode,
+ })
diff --git a/tests/demo/test_demo_api.py b/tests/demo/test_demo_api.py
new file mode 100644
index 00000000..edb31086
--- /dev/null
+++ b/tests/demo/test_demo_api.py
@@ -0,0 +1,66 @@
+"""
+T-API: Demo Config API endpoint verification.
+
+Tests the admin API endpoints for listing, loading, and removing demo configs.
+Uses FastAPI TestClient (no running server needed).
+
+Note: Login requires CSRF + form-data + httpOnly cookies, so we test
+unauthenticated rejection and the discovery module directly.
+"""
+
+import pytest
+
+
+class TestDemoConfigDiscovery:
+ """Test the auto-discovery module (no HTTP needed)."""
+
+ def test_discoveryFindsInvestorConfig(self):
+ from modules.demoConfigs import _getAvailableDemoConfigs
+ configs = _getAvailableDemoConfigs()
+ assert "investor-demo-2026" in configs, f"Available configs: {list(configs.keys())}"
+
+ def test_getByCodeReturnsInstance(self):
+ from modules.demoConfigs import _getDemoConfigByCode
+ cfg = _getDemoConfigByCode("investor-demo-2026")
+ assert cfg is not None
+ assert cfg.code == "investor-demo-2026"
+ assert cfg.label == "Investor Demo April 2026"
+
+ def test_getByCodeReturnsNoneForUnknown(self):
+ from modules.demoConfigs import _getDemoConfigByCode
+ cfg = _getDemoConfigByCode("nonexistent-config")
+ assert cfg is None
+
+ def test_toDictHasRequiredFields(self):
+ from modules.demoConfigs import _getDemoConfigByCode
+ cfg = _getDemoConfigByCode("investor-demo-2026")
+ d = cfg.toDict()
+ assert "code" in d
+ assert "label" in d
+ assert "description" in d
+ assert d["code"] == "investor-demo-2026"
+
+
+class TestDemoConfigApiEndpoints:
+ """Test API endpoints via TestClient."""
+
+ @pytest.fixture(scope="class")
+ def client(self):
+ try:
+ from app import app
+ from fastapi.testclient import TestClient
+ return TestClient(app)
+ except Exception as e:
+ pytest.skip(f"Cannot create TestClient: {e}")
+
+ def test_listEndpointRejectsUnauthenticated(self, client):
+ response = client.get("/api/admin/demo-config")
+ assert response.status_code in (401, 403)
+
+ def test_loadEndpointRejectsUnauthenticated(self, client):
+ response = client.post("/api/admin/demo-config/investor-demo-2026/load")
+ assert response.status_code in (401, 403)
+
+ def test_removeEndpointRejectsUnauthenticated(self, client):
+ response = client.post("/api/admin/demo-config/investor-demo-2026/remove")
+ assert response.status_code in (401, 403)
diff --git a/tests/demo/test_demo_bootstrap.py b/tests/demo/test_demo_bootstrap.py
new file mode 100644
index 00000000..1d725442
--- /dev/null
+++ b/tests/demo/test_demo_bootstrap.py
@@ -0,0 +1,133 @@
+"""
+T-BOOT: Bootstrap idempotency and demo state verification.
+
+Tests that the demo config can be loaded twice without errors
+and that all expected objects exist afterwards.
+"""
+
+import pytest
+from modules.datamodels.datamodelUam import Mandate, UserInDB
+from modules.datamodels.datamodelFeatures import FeatureInstance
+from modules.datamodels.datamodelMembership import UserMandate
+from tests.demo.conftest import _getFeatureInstances
+
+
+class TestDemoBootstrap:
+
+ def test_loadIsIdempotent(self, db, demoConfig):
+ """Loading the demo config twice must not raise errors."""
+ summary1 = demoConfig.load(db)
+ assert "errors" not in summary1 or len(summary1.get("errors", [])) == 0, f"First load errors: {summary1['errors']}"
+
+ summary2 = demoConfig.load(db)
+ assert "errors" not in summary2 or len(summary2.get("errors", [])) == 0, f"Second load errors: {summary2['errors']}"
+
+ def test_mandateHappylifeExists(self, db):
+ records = db.getRecordset(Mandate, recordFilter={"name": "happylife"})
+ assert len(records) == 1
+ assert records[0].get("label") == "HappyLife AG"
+ assert records[0].get("enabled") is True
+
+ def test_mandateAlpinaExists(self, db):
+ records = db.getRecordset(Mandate, recordFilter={"name": "alpina-treuhand"})
+ assert len(records) == 1
+ assert records[0].get("label") == "Alpina Treuhand AG"
+
+ def test_userPatrickExists(self, db):
+ records = db.getRecordset(UserInDB, recordFilter={"username": "patrick.helvetia"})
+ assert len(records) == 1
+ user = records[0]
+ assert user.get("email") == "p.motsch@poweron.swiss"
+ assert user.get("isSysAdmin") is True
+ assert user.get("language") == "en"
+
+ def test_userMembershipBothMandates(self, db, demoUser, mandateHappylife, mandateAlpina):
+ userId = demoUser.get("id")
+ for mandate in [mandateHappylife, mandateAlpina]:
+ mid = mandate.get("id")
+ memberships = db.getRecordset(UserMandate, recordFilter={"userId": userId, "mandateId": mid})
+ assert len(memberships) >= 1, f"User not member of mandate {mandate.get('label')}"
+
+ @pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "chatbot", "neutralization"])
+ def test_happylifeFeaturesExist(self, db, mandateHappylife, featureCode):
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, featureCode)
+ assert len(instances) >= 1, f"Feature '{featureCode}' missing in HappyLife AG"
+
+ @pytest.mark.parametrize("featureCode", ["workspace", "trustee", "graphicalEditor", "neutralization"])
+ def test_alpinaFeaturesExist(self, db, mandateAlpina, featureCode):
+ mid = mandateAlpina.get("id")
+ instances = _getFeatureInstances(db, mid, featureCode)
+ assert len(instances) >= 1, f"Feature '{featureCode}' missing in Alpina Treuhand AG"
+
+ def test_alpinaNoChatbot(self, db, mandateAlpina):
+ """Alpina should NOT have a chatbot instance."""
+ mid = mandateAlpina.get("id")
+ instances = _getFeatureInstances(db, mid, "chatbot")
+ assert len(instances) == 0, "Alpina Treuhand should not have chatbot"
+
+
+class TestDemoBootstrapRma:
+
+ def test_trusteeRmaConfigHappylife(self, db, mandateHappylife):
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "trustee")
+ assert instances, "No trustee instance in HappyLife"
+ iid = instances[0].get("id")
+ configs = db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": iid})
+ assert len(configs) >= 1, "No RMA config for HappyLife trustee"
+ assert configs[0].get("connectorType") == "rma"
+ assert configs[0].get("isActive") is True
+
+ def test_trusteeRmaConfigAlpina(self, db, mandateAlpina):
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
+ mid = mandateAlpina.get("id")
+ instances = _getFeatureInstances(db, mid, "trustee")
+ assert instances, "No trustee instance in Alpina"
+ iid = instances[0].get("id")
+ configs = db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": iid})
+ assert len(configs) >= 1, "No RMA config for Alpina trustee"
+ assert configs[0].get("connectorType") == "rma"
+
+
+class TestDemoBootstrapNeutralization:
+
+ def test_neutralizationConfigHappylife(self, db, mandateHappylife):
+ from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "neutralization")
+ assert instances
+ iid = instances[0].get("id")
+ configs = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": iid})
+ assert len(configs) >= 1, "No neutralization config for HappyLife"
+ assert configs[0].get("enabled") is True
+
+ def test_neutralizationConfigAlpina(self, db, mandateAlpina):
+ from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
+ mid = mandateAlpina.get("id")
+ instances = _getFeatureInstances(db, mid, "neutralization")
+ assert instances
+ iid = instances[0].get("id")
+ configs = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": iid})
+ assert len(configs) >= 1, "No neutralization config for Alpina"
+
+
+class TestDemoRemoveAndReload:
+
+ def test_removeAndReload(self, db, demoConfig):
+ """Remove all demo data, verify gone, then reload."""
+ removeSummary = demoConfig.remove(db)
+ assert len(removeSummary.get("errors", [])) == 0, f"Remove errors: {removeSummary['errors']}"
+
+ mandates = db.getRecordset(Mandate, recordFilter={"name": "happylife"})
+ assert len(mandates) == 0, "HappyLife mandate should be gone after remove"
+
+ users = db.getRecordset(UserInDB, recordFilter={"username": "patrick.helvetia"})
+ assert len(users) == 0, "User should be gone after remove"
+
+ loadSummary = demoConfig.load(db)
+ assert len(loadSummary.get("errors", [])) == 0, f"Reload errors: {loadSummary['errors']}"
+
+ mandates = db.getRecordset(Mandate, recordFilter={"name": "happylife"})
+ assert len(mandates) == 1, "HappyLife mandate should exist after reload"
diff --git a/tests/demo/test_demo_data_files.py b/tests/demo/test_demo_data_files.py
new file mode 100644
index 00000000..4e7a3d40
--- /dev/null
+++ b/tests/demo/test_demo_data_files.py
@@ -0,0 +1,44 @@
+"""
+T-DATA: Demo data files verification.
+
+Ensures all expected demo data files exist in gateway/demoData/.
+"""
+
+from pathlib import Path
+
+_DEMO_DATA_ROOT = Path(__file__).resolve().parent.parent.parent / "demoData"
+
+
+class TestDemoDataStructure:
+
+ def test_rootExists(self):
+ assert _DEMO_DATA_ROOT.exists(), f"demoData root not found: {_DEMO_DATA_ROOT}"
+
+ def test_invoicesNotEmpty(self):
+ d = _DEMO_DATA_ROOT / "invoices"
+ assert d.exists(), "invoices/ dir missing"
+ files = [f for f in d.iterdir() if not f.name.startswith(".")]
+ assert len(files) >= 1, f"invoices/ is empty: {list(d.iterdir())}"
+
+ def test_expensesNotEmpty(self):
+ d = _DEMO_DATA_ROOT / "expenses"
+ assert d.exists(), "expenses/ dir missing"
+ files = [f for f in d.iterdir() if not f.name.startswith(".")]
+ assert len(files) >= 1, f"expenses/ is empty: {list(d.iterdir())}"
+
+ def test_knowledgeBaseNotEmpty(self):
+ d = _DEMO_DATA_ROOT / "knowledge-base"
+ assert d.exists(), "knowledge-base/ dir missing"
+ files = [f for f in d.iterdir() if not f.name.startswith(".")]
+ assert len(files) >= 3, f"knowledge-base/ should have >=3 docs, found {len(files)}"
+
+ def test_neutralizerHasDossier(self):
+ pdf = _DEMO_DATA_ROOT / "neutralizer" / "tenant-dossier.pdf"
+ assert pdf.exists(), "tenant-dossier.pdf missing"
+ assert pdf.stat().st_size > 500, "tenant-dossier.pdf too small"
+
+ def test_trusteeNotEmpty(self):
+ d = _DEMO_DATA_ROOT / "trustee"
+ assert d.exists(), "trustee/ dir missing"
+ files = [f for f in d.iterdir() if not f.name.startswith(".")]
+ assert len(files) >= 1, f"trustee/ is empty"
diff --git a/tests/demo/test_demo_neutralization.py b/tests/demo/test_demo_neutralization.py
new file mode 100644
index 00000000..aca54491
--- /dev/null
+++ b/tests/demo/test_demo_neutralization.py
@@ -0,0 +1,36 @@
+"""
+T-NEU: Neutralization config verification.
+
+Verifies that neutralization is configured and enabled
+for both demo mandates.
+"""
+
+import pytest
+from tests.demo.conftest import _getFeatureInstances
+
+
+class TestNeutralizationConfig:
+
+ @pytest.mark.parametrize("mandateFixture", ["mandateHappylife", "mandateAlpina"])
+ def test_neutralizationEnabled(self, db, mandateFixture, request):
+ """Neutralization must be enabled for both mandates."""
+ mandate = request.getfixturevalue(mandateFixture)
+ mid = mandate.get("id")
+ instances = _getFeatureInstances(db, mid, "neutralization")
+ assert instances, f"No neutralization instance in {mandate.get('label')}"
+
+ from modules.features.neutralization.datamodelFeatureNeutralizer import DataNeutraliserConfig
+ iid = instances[0].get("id")
+ configs = db.getRecordset(DataNeutraliserConfig, recordFilter={"featureInstanceId": iid})
+ assert configs, f"No neutralization config in {mandate.get('label')}"
+ assert configs[0].get("enabled") is True, f"Neutralization not enabled in {mandate.get('label')}"
+
+
+class TestNeutralizationTestData:
+
+ def test_tenantDossierExists(self):
+ """The tenant-dossier.pdf must exist in demoData."""
+ from pathlib import Path
+ dossier = Path(__file__).resolve().parent.parent.parent / "demoData" / "neutralizer" / "tenant-dossier.pdf"
+ assert dossier.exists(), f"tenant-dossier.pdf not found at {dossier}"
+ assert dossier.stat().st_size > 500, "tenant-dossier.pdf seems too small"
diff --git a/tests/demo/test_demo_uc1_trustee.py b/tests/demo/test_demo_uc1_trustee.py
new file mode 100644
index 00000000..54d2ac70
--- /dev/null
+++ b/tests/demo/test_demo_uc1_trustee.py
@@ -0,0 +1,60 @@
+"""
+T-UC1: Trustee — Spesenverarbeitung.
+
+Verifies that the trustee feature instances are correctly configured
+with RMA accounting and that system workflow templates exist.
+"""
+
+import pytest
+from tests.demo.conftest import _getFeatureInstances
+
+
+class TestTrusteeSetup:
+
+ def test_trusteeInstancesExist(self, db, mandateHappylife, mandateAlpina):
+ """Both mandates must have a trustee instance."""
+ for mandate in [mandateHappylife, mandateAlpina]:
+ mid = mandate.get("id")
+ instances = _getFeatureInstances(db, mid, "trustee")
+ assert len(instances) >= 1, f"No trustee in {mandate.get('label')}"
+
+ def test_rmaCredentialsEncrypted(self, db, mandateHappylife):
+ """RMA config must have non-empty encrypted credentials."""
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "trustee")
+ iid = instances[0].get("id")
+ configs = db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": iid})
+ assert configs
+ enc = configs[0].get("encryptedConfig", "")
+ assert enc and len(enc) > 10, "encryptedConfig should be a non-trivial encrypted blob"
+
+ def test_rmaCredentialsDecryptable(self, db, mandateHappylife):
+ """Encrypted RMA config must be decryptable and contain expected keys."""
+ import json
+ from modules.features.trustee.datamodelFeatureTrustee import TrusteeAccountingConfig
+ from modules.shared.configuration import decryptValue
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "trustee")
+ iid = instances[0].get("id")
+ configs = db.getRecordset(TrusteeAccountingConfig, recordFilter={"featureInstanceId": iid})
+ enc = configs[0].get("encryptedConfig", "")
+ plain = json.loads(decryptValue(enc, userId="system", keyName="accountingConfig"))
+ assert "apiBaseUrl" in plain
+ assert "clientName" in plain
+ assert "apiKey" in plain
+ assert plain["apiKey"], "apiKey should not be empty"
+
+
+class TestSystemWorkflowTemplates:
+
+ def test_systemTemplatesExist(self, db):
+ """System workflow templates should exist (created by system bootstrap, not demo config)."""
+ from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import AutoWorkflow
+ try:
+ templates = db.getRecordset(AutoWorkflow, recordFilter={"isTemplate": True, "templateScope": "system"})
+ except Exception:
+ pytest.skip("AutoWorkflow table not accessible from app DB")
+ return
+ if len(templates) == 0:
+ pytest.skip("No system workflow templates — run full system bootstrap first")
diff --git a/tests/demo/test_demo_uc2_realestate.py b/tests/demo/test_demo_uc2_realestate.py
new file mode 100644
index 00000000..0d91122e
--- /dev/null
+++ b/tests/demo/test_demo_uc2_realestate.py
@@ -0,0 +1,24 @@
+"""
+T-UC2: Immobilien — Machbarkeitsstudie.
+
+Verifies that the workspace feature is available for the agent-based
+real estate demo (UC2 runs via workspace, not a dedicated realestate instance).
+"""
+
+import pytest
+from tests.demo.conftest import _getFeatureInstances
+
+
+class TestRealEstateReadiness:
+
+ def test_workspaceInstanceHappylife(self, db, mandateHappylife):
+ """HappyLife must have a workspace instance for the agent demo."""
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "workspace")
+ assert len(instances) >= 1, "No workspace instance in HappyLife for UC2"
+
+ def test_workspaceInstanceAlpina(self, db, mandateAlpina):
+ """Alpina must have a workspace instance."""
+ mid = mandateAlpina.get("id")
+ instances = _getFeatureInstances(db, mid, "workspace")
+ assert len(instances) >= 1, "No workspace instance in Alpina"
diff --git a/tests/demo/test_demo_uc3_chatbot.py b/tests/demo/test_demo_uc3_chatbot.py
new file mode 100644
index 00000000..89c8d7ba
--- /dev/null
+++ b/tests/demo/test_demo_uc3_chatbot.py
@@ -0,0 +1,37 @@
+"""
+T-UC3: Knowledge Chatbot.
+
+Verifies that the chatbot feature instance exists in HappyLife AG
+and that knowledge-base documents are available for upload.
+Note: The actual RAG demo runs via workspace, not the chatbot's own index.
+"""
+
+import pytest
+from pathlib import Path
+from tests.demo.conftest import _getFeatureInstances
+
+
+class TestChatbotSetup:
+
+ def test_chatbotInstanceHappylife(self, db, mandateHappylife):
+ """HappyLife must have a chatbot instance."""
+ mid = mandateHappylife.get("id")
+ instances = _getFeatureInstances(db, mid, "chatbot")
+ assert len(instances) >= 1, "No chatbot instance in HappyLife"
+
+ def test_chatbotNotInAlpina(self, db, mandateAlpina):
+ """Alpina should NOT have a chatbot instance."""
+ mid = mandateAlpina.get("id")
+ instances = _getFeatureInstances(db, mid, "chatbot")
+ assert len(instances) == 0, "Alpina should not have chatbot"
+
+
+class TestKnowledgeBaseFiles:
+
+ def test_knowledgeBaseFilesExist(self):
+ """Knowledge-base documents must exist in demoData."""
+ kbDir = Path(__file__).resolve().parent.parent.parent / "demoData" / "knowledge-base"
+ assert kbDir.exists(), f"knowledge-base dir not found at {kbDir}"
+ files = list(kbDir.iterdir())
+ docs = [f for f in files if f.suffix in (".md", ".html", ".pdf", ".docx", ".txt")]
+ assert len(docs) >= 3, f"Expected at least 3 knowledge-base docs, found {len(docs)}: {[f.name for f in docs]}"
diff --git a/tests/demo/test_demo_uc4_i18n.py b/tests/demo/test_demo_uc4_i18n.py
new file mode 100644
index 00000000..04eba4b9
--- /dev/null
+++ b/tests/demo/test_demo_uc4_i18n.py
@@ -0,0 +1,51 @@
+"""
+T-UC4: Sprach-Deployment — Spanish (es).
+
+Verifies that the i18n system is ready for the live demo:
+- Admin languages page is reachable
+- Spanish is available as a choice but NOT pre-installed
+- xx base set exists with entries
+"""
+
+import pytest
+
+
+class TestI18nReadiness:
+
+ def test_xxBaseSetExists(self, db):
+ """The xx (meta/base) language set must exist with entries."""
+ try:
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+ sets = db.getRecordset(UiLanguageSet, recordFilter={"id": "xx"})
+ assert sets, "xx base set not found — run i18n sync first"
+ entries = sets[0].get("entries") or []
+ assert len(entries) > 50, f"xx set has only {len(entries)} entries — expected 50+"
+ except Exception as e:
+ pytest.skip(f"i18n table not accessible: {e}")
+
+ def test_spanishNotPreInstalled(self, db):
+ """Spanish (es) must NOT be pre-installed — it will be created live."""
+ try:
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+ sets = db.getRecordset(UiLanguageSet, recordFilter={"id": "es"})
+ assert len(sets) == 0, "Spanish (es) is already installed — remove it before demo!"
+ except Exception as e:
+ pytest.skip(f"i18n table not accessible: {e}")
+
+ def test_germanSetExists(self, db):
+ """German (de) set must exist and be complete."""
+ try:
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+ sets = db.getRecordset(UiLanguageSet, recordFilter={"id": "de"})
+ assert sets, "German (de) set not found"
+ except Exception as e:
+ pytest.skip(f"i18n table not accessible: {e}")
+
+ def test_englishSetExists(self, db):
+ """English (en) set must exist."""
+ try:
+ from modules.datamodels.datamodelUiLanguage import UiLanguageSet
+ sets = db.getRecordset(UiLanguageSet, recordFilter={"id": "en"})
+ assert sets, "English (en) set not found"
+ except Exception as e:
+ pytest.skip(f"i18n table not accessible: {e}")
diff --git a/tests/functional/test05_workflow_with_documents.py b/tests/functional/test05_workflow_with_documents.py
deleted file mode 100644
index 5ca9be17..00000000
--- a/tests/functional/test05_workflow_with_documents.py
+++ /dev/null
@@ -1,369 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Workflow Test with Documents - Tests chat workflow execution with uploaded documents
-Simulates the UI route flow: upload files, start workflow with prompt and documents
-"""
-
-import asyncio
-import json
-import sys
-import os
-import time
-from typing import Dict, Any, List, Optional
-
-# Add the gateway to path (go up 2 levels from tests/functional/)
-_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
-if _gateway_path not in sys.path:
- sys.path.insert(0, _gateway_path)
-
-# Import the service initialization
-from modules.serviceHub import getInterface as getServices
-from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
-from modules.datamodels.datamodelUam import User
-from modules.workflows.automation import chatStart
-import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
-
-
-class WorkflowWithDocumentsTester:
- def __init__(self):
- # Use root user for testing (has full access to everything)
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.datamodels.datamodelUam import Mandate
- rootInterface = getRootInterface()
- self.testUser = rootInterface.currentUser
- # Get initial mandate ID for testing (User has no mandateId - use initial mandate)
- self.testMandateId = rootInterface.getInitialId(Mandate)
-
- # Initialize services using the existing system
- self.services = getServices(self.testUser, None) # Test user, no workflow
- self.workflow = None
- self.testResults = {}
-
- async def initialize(self):
- """Initialize the test environment."""
- # Set logging level to INFO to see workflow progress
- import logging
- logging.getLogger().setLevel(logging.INFO)
-
- print(f"Initialized test with user: {self.testUser.id}")
- print(f"Test Mandate ID: {self.testMandateId}")
-
- def createCsvTemplate(self) -> str:
- """Create a CSV template file for prime numbers."""
- csvContent = """Primzahl,Index
-2,1
-3,2
-5,3
-7,4
-11,5
-13,6
-17,7
-19,8
-23,9
-29,10
-"""
- return csvContent
-
- def createSecondDocument(self) -> str:
- """Create a second text document with instructions."""
- docContent = """Anweisungen zur Primzahlgenerierung:
-
-1. Generiere Primzahlen
-2. Formatiere sie in einer Tabelle mit 10 Spalten pro Zeile
-3. Verwende das bereitgestellte CSV-Vorlagenformat
-4. Stelle sicher, dass alle Zahlen korrekt formatiert sind
-5. Füge eine Index-Spalte hinzu, die bei 1 beginnt
-
-
-"""
- return docContent
-
- async def uploadFiles(self) -> List[str]:
- """Upload test files to the filesystem and return their file IDs."""
- print("\n" + "="*60)
- print("UPLOADING TEST FILES")
- print("="*60)
-
- fileIds = []
-
- # Create CSV template file
- csvContent = self.createCsvTemplate()
- csvFileName = "prime_numbers_template.csv"
-
- print(f"Creating CSV template: {csvFileName}")
- print(f"Content length: {len(csvContent)} bytes")
-
- # Create file in component storage
- csvFileItem = self.services.interfaceDbComponent.createFile(
- name=csvFileName,
- mimeType="text/csv",
- content=csvContent.encode('utf-8')
- )
- # Persist file data
- self.services.interfaceDbComponent.createFileData(csvFileItem.id, csvContent.encode('utf-8'))
-
- fileIds.append(csvFileItem.id)
- print(f"✅ Created CSV file with ID: {csvFileItem.id}")
- print(f" File name: {csvFileItem.fileName}")
- print(f" MIME type: {csvFileItem.mimeType}")
-
- # Create second text document
- docContent = self.createSecondDocument()
- docFileName = "prime_numbers_instructions.txt"
-
- print(f"\nCreating instruction document: {docFileName}")
- print(f"Content length: {len(docContent)} bytes")
-
- # Create file in component storage
- docFileItem = self.services.interfaceDbComponent.createFile(
- name=docFileName,
- mimeType="text/plain",
- content=docContent.encode('utf-8')
- )
- # Persist file data
- self.services.interfaceDbComponent.createFileData(docFileItem.id, docContent.encode('utf-8'))
-
- fileIds.append(docFileItem.id)
- print(f"✅ Created instruction file with ID: {docFileItem.id}")
- print(f" File name: {docFileItem.fileName}")
- print(f" MIME type: {docFileItem.mimeType}")
-
- return fileIds
-
- async def startWorkflow(self, prompt: str, fileIds: List[str]) -> None:
- """Start a chat workflow with prompt and documents."""
- print("\n" + "="*60)
- print("STARTING WORKFLOW")
- print("="*60)
-
- print(f"Prompt: {prompt}")
- print(f"Number of files: {len(fileIds)}")
- print(f"File IDs: {fileIds}")
-
- # Create UserInputRequest
- userInput = UserInputRequest(
- prompt=prompt,
- listFileId=fileIds,
- userLanguage="en"
- )
-
- # Start workflow (this is async and returns immediately)
- print("\nCalling chatStart...")
- self.workflow = await chatStart(
- currentUser=self.testUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- workflowId=None
- )
-
- print(f"✅ Workflow started with ID: {self.workflow.id}")
- print(f" Status: {self.workflow.status}")
- print(f" Mode: {self.workflow.workflowMode}")
- print(f" Current Round: {self.workflow.currentRound}")
-
- async def waitForWorkflowCompletion(self, maxWaitTime: Optional[int] = None) -> bool:
- """Wait for workflow to complete, checking status periodically.
-
- Args:
- maxWaitTime: Maximum wait time in seconds. If None, wait indefinitely.
- """
- print("\n" + "="*60)
- print("WAITING FOR WORKFLOW COMPLETION")
- if maxWaitTime:
- print(f"Maximum wait time: {maxWaitTime} seconds")
- else:
- print("Waiting indefinitely (no timeout)")
- print("="*60)
-
- if not self.workflow:
- print("❌ No workflow to wait for")
- return False
-
- startTime = time.time()
- checkInterval = 2 # Check every 2 seconds
- lastStatus = None
-
- while True:
- # Check timeout if maxWaitTime is set
- if maxWaitTime is not None:
- elapsed = time.time() - startTime
- if elapsed >= maxWaitTime:
- print(f"\n⚠️ Workflow did not complete within {maxWaitTime} seconds")
- print(f" Final status: {self.workflow.status}")
- return False
-
- # Get current workflow status
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
-
- if not currentWorkflow:
- print("❌ Workflow not found in database")
- return False
-
- currentStatus = currentWorkflow.status
- elapsed = int(time.time() - startTime)
-
- # Print status if it changed
- if currentStatus != lastStatus:
- print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
- lastStatus = currentStatus
-
- # Check if workflow is complete
- if currentStatus in ["completed", "stopped", "failed"]:
- self.workflow = currentWorkflow
- print(f"\n✅ Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
- return currentStatus == "completed"
-
- # Wait before next check
- await asyncio.sleep(checkInterval)
-
- def analyzeWorkflowResults(self) -> Dict[str, Any]:
- """Analyze workflow results and extract information."""
- print("\n" + "="*60)
- print("ANALYZING WORKFLOW RESULTS")
- print("="*60)
-
- if not self.workflow:
- return {"error": "No workflow to analyze"}
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- workflow = interfaceDbChat.getWorkflow(self.workflow.id)
-
- if not workflow:
- return {"error": "Workflow not found"}
-
- # Get unified chat data
- chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
-
- # Count messages
- messages = chatData.get("messages", [])
- userMessages = [m for m in messages if m.get("role") == "user"]
- assistantMessages = [m for m in messages if m.get("role") == "assistant"]
-
- # Count documents
- documents = chatData.get("documents", [])
-
- # Get logs
- logs = chatData.get("logs", [])
-
- # Get stats
- stats = chatData.get("stats", [])
-
- results = {
- "workflowId": workflow.id,
- "status": workflow.status,
- "workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
- "currentRound": workflow.currentRound,
- "totalTasks": workflow.totalTasks,
- "totalActions": workflow.totalActions,
- "messageCount": len(messages),
- "userMessageCount": len(userMessages),
- "assistantMessageCount": len(assistantMessages),
- "documentCount": len(documents),
- "logCount": len(logs),
- "statCount": len(stats),
- "messages": messages,
- "documents": documents,
- "logs": logs,
- "stats": stats
- }
-
- print(f"Workflow ID: {results['workflowId']}")
- print(f"Status: {results['status']}")
- print(f"Mode: {results['workflowMode']}")
- print(f"Round: {results['currentRound']}")
- print(f"Tasks: {results['totalTasks']}")
- print(f"Actions: {results['totalActions']}")
- print(f"Messages: {results['messageCount']} (User: {results['userMessageCount']}, Assistant: {results['assistantMessageCount']})")
- print(f"Documents: {results['documentCount']}")
- print(f"Logs: {results['logCount']}")
- print(f"Stats: {results['statCount']}")
-
- # Print first user message
- if userMessages:
- print(f"\nFirst user message:")
- print(f" {userMessages[0].get('message', '')[:200]}...")
-
- # Print last assistant message
- if assistantMessages:
- print(f"\nLast assistant message:")
- lastMsg = assistantMessages[-1]
- print(f" {lastMsg.get('message', '')[:200]}...")
- if lastMsg.get('documents'):
- print(f" Documents attached: {len(lastMsg['documents'])}")
-
- # Print document names
- if documents:
- print(f"\nGenerated documents:")
- for doc in documents:
- print(f" - {doc.get('fileName', 'unknown')} ({doc.get('fileSize', 0)} bytes)")
-
- return results
-
- async def runTest(self):
- """Run the complete test."""
- print("\n" + "="*80)
- print("WORKFLOW TEST WITH DOCUMENTS")
- print("="*80)
-
- try:
- # Initialize
- await self.initialize()
-
- # Upload files
- fileIds = await self.uploadFiles()
-
- # Start workflow with prompt and files
- prompt = "Generiere die ersten 4000 Primzahlen in einer Tabelle mit 10 Spalten pro Zeile."
- await self.startWorkflow(prompt, fileIds)
-
- # Wait for completion (no timeout - wait indefinitely)
- completed = await self.waitForWorkflowCompletion()
-
- # Analyze results
- results = self.analyzeWorkflowResults()
-
- self.testResults = {
- "completed": completed,
- "results": results
- }
-
- print("\n" + "="*80)
- print("TEST SUMMARY")
- print("="*80)
- print(f"Workflow completed: {'✅' if completed else '❌'}")
- print(f"Status: {results.get('status', 'unknown')}")
- print(f"Messages: {results.get('messageCount', 0)}")
- print(f"Documents: {results.get('documentCount', 0)}")
-
- return self.testResults
-
- except Exception as e:
- import traceback
- print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
- print(f"Traceback:\n{traceback.format_exc()}")
- self.testResults = {
- "completed": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
- return self.testResults
-
-
-async def main():
- """Run workflow test with documents."""
- tester = WorkflowWithDocumentsTester()
- results = await tester.runTest()
-
- # Print final results as JSON for easy parsing
- print("\n" + "="*80)
- print("FINAL RESULTS (JSON)")
- print("="*80)
- print(json.dumps(results, indent=2, default=str))
-
-
-if __name__ == "__main__":
- asyncio.run(main())
-
diff --git a/tests/functional/test06_workflow_prompt_variations.py b/tests/functional/test06_workflow_prompt_variations.py
deleted file mode 100644
index 91f1cccc..00000000
--- a/tests/functional/test06_workflow_prompt_variations.py
+++ /dev/null
@@ -1,471 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Workflow Test with Prompt Variations - Tests different workflow scenarios:
-1. Simple prompt for short answer (no documents)
-2. Merge 2 documents and output as Word document
-3. Structured data output as Excel file
-"""
-
-import asyncio
-import json
-import sys
-import os
-import time
-from typing import Dict, Any, List, Optional
-
-# Add the gateway to path (go up 2 levels from tests/functional/)
-_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
-if _gateway_path not in sys.path:
- sys.path.insert(0, _gateway_path)
-
-# Import the service initialization
-from modules.serviceHub import getInterface as getServices
-from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
-from modules.datamodels.datamodelUam import User
-from modules.workflows.automation import chatStart
-import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
-
-
-class WorkflowPromptVariationsTester:
- def __init__(self):
- # Use root user for testing (has full access to everything)
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.datamodels.datamodelUam import Mandate
- rootInterface = getRootInterface()
- self.testUser = rootInterface.currentUser
- # Get initial mandate ID for testing (User has no mandateId - use initial mandate)
- self.testMandateId = rootInterface.getInitialId(Mandate)
-
- # Initialize services using the existing system
- self.services = getServices(self.testUser, None) # Test user, no workflow
- self.testResults = {}
-
- async def initialize(self):
- """Initialize the test environment."""
- # Set logging level to INFO to see workflow progress
- import logging
- logging.getLogger().setLevel(logging.INFO)
-
- print(f"Initialized test with user: {self.testUser.id}")
- print(f"Test Mandate ID: {self.testMandateId}")
-
- def _createFile(self, fileName: str, mimeType: str, content: str) -> str:
- """Helper method to create a file and return its ID."""
- fileItem = self.services.interfaceDbComponent.createFile(
- name=fileName,
- mimeType=mimeType,
- content=content.encode('utf-8')
- )
- self.services.interfaceDbComponent.createFileData(fileItem.id, content.encode('utf-8'))
- return fileItem.id
-
- async def _startWorkflow(self, prompt: str, fileIds: List[str] = None) -> Any:
- """Start a chat workflow with prompt and optional documents."""
- if fileIds is None:
- fileIds = []
-
- print(f"\nPrompt: {prompt}")
- print(f"Number of files: {len(fileIds)}")
- if fileIds:
- print(f"File IDs: {fileIds}")
-
- # Create UserInputRequest
- userInput = UserInputRequest(
- prompt=prompt,
- listFileId=fileIds,
- userLanguage="en"
- )
-
- # Start workflow (this is async and returns immediately)
- workflow = await chatStart(
- currentUser=self.testUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- workflowId=None
- )
-
- print(f"✅ Workflow started with ID: {workflow.id}")
- print(f" Status: {workflow.status}")
- print(f" Mode: {workflow.workflowMode}")
-
- return workflow
-
- async def _waitForWorkflowCompletion(self, workflow: Any, maxWaitTime: Optional[int] = None) -> bool:
- """Wait for workflow to complete, checking status periodically.
-
- Args:
- workflow: The workflow object to wait for
- maxWaitTime: Maximum wait time in seconds. If None, wait indefinitely.
- """
- if maxWaitTime:
- print(f"Maximum wait time: {maxWaitTime} seconds")
- else:
- print("Waiting indefinitely (no timeout)")
-
- startTime = time.time()
- checkInterval = 2 # Check every 2 seconds
- lastStatus = None
-
- while True:
- # Check timeout if maxWaitTime is set
- if maxWaitTime is not None:
- elapsed = time.time() - startTime
- if elapsed >= maxWaitTime:
- print(f"\n⚠️ Workflow did not complete within {maxWaitTime} seconds")
- print(f" Final status: {workflow.status}")
- return False
-
- # Get current workflow status
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- currentWorkflow = interfaceDbChat.getWorkflow(workflow.id)
-
- if not currentWorkflow:
- print("❌ Workflow not found in database")
- return False
-
- currentStatus = currentWorkflow.status
- elapsed = int(time.time() - startTime)
-
- # Print status if it changed
- if currentStatus != lastStatus:
- print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
- lastStatus = currentStatus
-
- # Check if workflow is complete
- if currentStatus in ["completed", "stopped", "failed"]:
- print(f"\n✅ Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
- return currentStatus == "completed"
-
- # Wait before next check
- await asyncio.sleep(checkInterval)
-
- def _analyzeWorkflowResults(self, workflow: Any) -> Dict[str, Any]:
- """Analyze workflow results and extract information."""
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- workflow = interfaceDbChat.getWorkflow(workflow.id)
-
- if not workflow:
- return {"error": "Workflow not found"}
-
- # Get unified chat data
- chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
-
- # Extract messages and documents from items
- items = chatData.get("items", [])
- messages = []
- allDocuments = []
-
- for item in items:
- if item.get("type") == "message":
- message = item.get("item")
- if message:
- # Convert ChatMessage to dict if needed
- if hasattr(message, 'dict'):
- msgDict = message.dict()
- elif hasattr(message, '__dict__'):
- msgDict = message.__dict__
- else:
- msgDict = message if isinstance(message, dict) else {}
-
- messages.append(msgDict)
-
- # Extract documents from message
- msgDocuments = msgDict.get("documents", [])
- if msgDocuments:
- for doc in msgDocuments:
- # Convert ChatDocument to dict if needed
- if hasattr(doc, 'dict'):
- docDict = doc.dict()
- elif hasattr(doc, '__dict__'):
- docDict = doc.__dict__
- else:
- docDict = doc if isinstance(doc, dict) else {}
-
- # Only add if not already in list (avoid duplicates)
- docId = docDict.get("id") or docDict.get("fileId")
- if docId and not any(d.get("id") == docId or d.get("fileId") == docId for d in allDocuments):
- allDocuments.append(docDict)
-
- userMessages = [m for m in messages if m.get("role") == "user"]
- assistantMessages = [m for m in messages if m.get("role") == "assistant"]
-
- results = {
- "workflowId": workflow.id,
- "status": workflow.status,
- "workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
- "currentRound": workflow.currentRound,
- "totalTasks": workflow.totalTasks,
- "totalActions": workflow.totalActions,
- "messageCount": len(messages),
- "userMessageCount": len(userMessages),
- "assistantMessageCount": len(assistantMessages),
- "documentCount": len(allDocuments),
- "documents": allDocuments
- }
-
- print(f" Workflow ID: {results['workflowId']}")
- print(f" Status: {results['status']}")
- print(f" Messages: {results['messageCount']} (User: {results['userMessageCount']}, Assistant: {results['assistantMessageCount']})")
- print(f" Documents: {results['documentCount']}")
-
- # Print document names
- if allDocuments:
- print(f" Generated documents:")
- for doc in allDocuments:
- fileName = doc.get("fileName") or doc.get("documentName") or "unknown"
- fileSize = doc.get("fileSize") or doc.get("size") or 0
- print(f" - {fileName} ({fileSize} bytes)")
-
- return results
-
- async def testSimplePrompt(self) -> Dict[str, Any]:
- """Test 1: Simple prompt for a short answer (no documents)."""
- print("\n" + "="*80)
- print("TEST 1: SIMPLE PROMPT FOR SHORT ANSWER")
- print("="*80)
-
- try:
- prompt = "What is the capital of France? Answer in one sentence."
-
- workflow = await self._startWorkflow(prompt, [])
- completed = await self._waitForWorkflowCompletion(workflow, maxWaitTime=120)
- results = self._analyzeWorkflowResults(workflow)
-
- return {
- "testName": "Simple Prompt",
- "completed": completed,
- "results": results
- }
- except Exception as e:
- import traceback
- print(f"❌ Test failed: {type(e).__name__}: {str(e)}")
- return {
- "testName": "Simple Prompt",
- "completed": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- async def testMergeDocumentsToWord(self) -> Dict[str, Any]:
- """Test 2: Merge 2 documents and output as Word document."""
- print("\n" + "="*80)
- print("TEST 2: MERGE 2 DOCUMENTS AND OUTPUT AS WORD")
- print("="*80)
-
- try:
- # Create first document
- doc1Content = """Project Overview
-
-This document outlines the key objectives for our new software project.
-The project aims to develop a modern web application with the following features:
-- User authentication and authorization
-- Real-time data synchronization
-- Responsive design for mobile and desktop
-- Integration with third-party APIs
-
-Timeline: 6 months
-Budget: $500,000
-"""
-
- # Create second document
- doc2Content = """Technical Specifications
-
-Architecture:
-- Frontend: React with TypeScript
-- Backend: Python with FastAPI
-- Database: PostgreSQL
-- Deployment: Docker containers on AWS
-
-Key Requirements:
-- Support for 10,000 concurrent users
-- 99.9% uptime SLA
-- End-to-end encryption for sensitive data
-- Comprehensive logging and monitoring
-
-Team Size: 8 developers, 2 designers, 1 project manager
-"""
-
- print("\nCreating documents to merge...")
- doc1Id = self._createFile("project_overview.txt", "text/plain", doc1Content)
- print(f"✅ Created document 1 with ID: {doc1Id}")
-
- doc2Id = self._createFile("technical_specs.txt", "text/plain", doc2Content)
- print(f"✅ Created document 2 with ID: {doc2Id}")
-
- prompt = "Merge these two documents into a single comprehensive Word document. Include both the project overview and technical specifications in a well-formatted document with proper headings and sections."
-
- workflow = await self._startWorkflow(prompt, [doc1Id, doc2Id])
- completed = await self._waitForWorkflowCompletion(workflow, maxWaitTime=300)
- results = self._analyzeWorkflowResults(workflow)
-
- # Check if Word document was created
- wordDocFound = False
- if results.get("documents"):
- for doc in results["documents"]:
- fileName = doc.get("fileName", "").lower()
- if fileName.endswith(".docx") or fileName.endswith(".doc"):
- wordDocFound = True
- print(f" ✅ Word document found: {doc.get('fileName')}")
-
- if not wordDocFound:
- print(" ⚠️ Warning: No Word document (.docx or .doc) found in results")
-
- return {
- "testName": "Merge Documents to Word",
- "completed": completed,
- "wordDocumentFound": wordDocFound,
- "results": results
- }
- except Exception as e:
- import traceback
- print(f"❌ Test failed: {type(e).__name__}: {str(e)}")
- return {
- "testName": "Merge Documents to Word",
- "completed": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- async def testStructuredDataToExcel(self) -> Dict[str, Any]:
- """Test 3: Structured data output as Excel file."""
- print("\n" + "="*80)
- print("TEST 3: STRUCTURED DATA OUTPUT AS EXCEL")
- print("="*80)
-
- try:
- # Create structured data as JSON
- structuredData = {
- "employees": [
- {"id": 1, "name": "John Doe", "department": "Engineering", "salary": 95000, "startDate": "2020-01-15"},
- {"id": 2, "name": "Jane Smith", "department": "Marketing", "salary": 85000, "startDate": "2019-03-20"},
- {"id": 3, "name": "Bob Johnson", "department": "Engineering", "salary": 100000, "startDate": "2018-06-10"},
- {"id": 4, "name": "Alice Williams", "department": "HR", "salary": 75000, "startDate": "2021-09-05"},
- {"id": 5, "name": "Charlie Brown", "department": "Sales", "salary": 80000, "startDate": "2020-11-12"},
- {"id": 6, "name": "Diana Prince", "department": "Engineering", "salary": 110000, "startDate": "2017-04-22"},
- {"id": 7, "name": "Edward Norton", "department": "Marketing", "salary": 90000, "startDate": "2019-08-30"},
- {"id": 8, "name": "Fiona Green", "department": "HR", "salary": 78000, "startDate": "2022-01-18"}
- ],
- "departments": [
- {"name": "Engineering", "budget": 500000, "headCount": 3},
- {"name": "Marketing", "budget": 300000, "headCount": 2},
- {"name": "HR", "budget": 200000, "headCount": 2},
- {"name": "Sales", "budget": 250000, "headCount": 1}
- ]
- }
-
- jsonContent = json.dumps(structuredData, indent=2)
-
- print("\nCreating structured data file...")
- dataFileId = self._createFile("employee_data.json", "application/json", jsonContent)
- print(f"✅ Created data file with ID: {dataFileId}")
-
- prompt = "Create an Excel file from this structured data. Include two sheets: one for employees with all their details, and one for departments with summary information. Format the data nicely with proper column headers and make it easy to read."
-
- workflow = await self._startWorkflow(prompt, [dataFileId])
- completed = await self._waitForWorkflowCompletion(workflow, maxWaitTime=300)
- results = self._analyzeWorkflowResults(workflow)
-
- # Check if Excel document was created
- excelDocFound = False
- if results.get("documents"):
- for doc in results["documents"]:
- fileName = doc.get("fileName", "").lower()
- if fileName.endswith(".xlsx") or fileName.endswith(".xls"):
- excelDocFound = True
- print(f" ✅ Excel document found: {doc.get('fileName')}")
-
- if not excelDocFound:
- print(" ⚠️ Warning: No Excel document (.xlsx or .xls) found in results")
-
- return {
- "testName": "Structured Data to Excel",
- "completed": completed,
- "excelDocumentFound": excelDocFound,
- "results": results
- }
- except Exception as e:
- import traceback
- print(f"❌ Test failed: {type(e).__name__}: {str(e)}")
- return {
- "testName": "Structured Data to Excel",
- "completed": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- async def runAllTests(self):
- """Run all three test cases."""
- print("\n" + "="*80)
- print("WORKFLOW PROMPT VARIATIONS TEST SUITE")
- print("="*80)
-
- try:
- # Initialize
- await self.initialize()
-
- # Run all tests
- test1Results = await self.testSimplePrompt()
- test2Results = await self.testMergeDocumentsToWord()
- test3Results = await self.testStructuredDataToExcel()
-
- self.testResults = {
- "test1": test1Results,
- "test2": test2Results,
- "test3": test3Results,
- "summary": {
- "totalTests": 3,
- "passedTests": sum([
- 1 if test1Results.get("completed") else 0,
- 1 if test2Results.get("completed") else 0,
- 1 if test3Results.get("completed") else 0
- ]),
- "failedTests": sum([
- 1 if not test1Results.get("completed") else 0,
- 1 if not test2Results.get("completed") else 0,
- 1 if not test3Results.get("completed") else 0
- ])
- }
- }
-
- print("\n" + "="*80)
- print("TEST SUITE SUMMARY")
- print("="*80)
- print(f"Test 1 - Simple Prompt: {'✅ PASSED' if test1Results.get('completed') else '❌ FAILED'}")
- print(f"Test 2 - Merge to Word: {'✅ PASSED' if test2Results.get('completed') else '❌ FAILED'}")
- if test2Results.get('wordDocumentFound'):
- print(f" Word document created: ✅")
- print(f"Test 3 - Data to Excel: {'✅ PASSED' if test3Results.get('completed') else '❌ FAILED'}")
- if test3Results.get('excelDocumentFound'):
- print(f" Excel document created: ✅")
- print(f"\nTotal: {self.testResults['summary']['passedTests']}/{self.testResults['summary']['totalTests']} tests passed")
-
- return self.testResults
-
- except Exception as e:
- import traceback
- print(f"\n❌ Test suite failed with error: {type(e).__name__}: {str(e)}")
- print(f"Traceback:\n{traceback.format_exc()}")
- self.testResults = {
- "error": str(e),
- "traceback": traceback.format_exc()
- }
- return self.testResults
-
-
-async def main():
- """Run workflow prompt variations test suite."""
- tester = WorkflowPromptVariationsTester()
- results = await tester.runAllTests()
-
- # Print final results as JSON for easy parsing
- print("\n" + "="*80)
- print("FINAL RESULTS (JSON)")
- print("="*80)
- print(json.dumps(results, indent=2, default=str))
-
-
-if __name__ == "__main__":
- asyncio.run(main())
-
diff --git a/tests/functional/test09_document_generation_formats.py b/tests/functional/test09_document_generation_formats.py
deleted file mode 100644
index 844863a2..00000000
--- a/tests/functional/test09_document_generation_formats.py
+++ /dev/null
@@ -1,735 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Document Generation Formats Test - Tests document generation in all supported formats
-Tests HTML, PDF, DOCX, XLSX, and PPTX generation with images and various content types.
-"""
-
-import asyncio
-import json
-import sys
-import os
-import time
-import base64
-from typing import Dict, Any, List, Optional
-
-# Add the gateway to path (go up 2 levels from tests/functional/)
-_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
-if _gateway_path not in sys.path:
- sys.path.insert(0, _gateway_path)
-
-# Import the service initialization
-from modules.serviceHub import getInterface as getServices
-from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
-from modules.datamodels.datamodelUam import User
-from modules.workflows.automation import chatStart
-import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
-
-
-class DocumentGenerationFormatsTester:
- def __init__(self):
- # Use root user for testing (has full access to everything)
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.datamodels.datamodelUam import Mandate
- rootInterface = getRootInterface()
- self.testUser = rootInterface.currentUser
- # Get initial mandate ID for testing (User has no mandateId - use initial mandate)
- self.testMandateId = rootInterface.getInitialId(Mandate)
-
- # Initialize services using the existing system
- self.services = getServices(self.testUser, None) # Test user, no workflow
- self.workflow = None
- self.testResults = {}
- self.generatedDocuments = {}
- self.pdfFileId = None # Store PDF file ID for reuse
-
- async def initialize(self):
- """Initialize the test environment."""
- # Enable debug file logging for tests
- from modules.shared.configuration import APP_CONFIG
- APP_CONFIG.set("APP_DEBUG_CHAT_WORKFLOW_ENABLED", True)
-
- # Set logging level to INFO to see workflow progress
- import logging
- logging.getLogger().setLevel(logging.INFO)
-
- print(f"Initialized test with user: {self.testUser.id}")
- print(f"Test Mandate ID: {self.testMandateId}")
- print(f"Debug logging enabled: {APP_CONFIG.get('APP_DEBUG_CHAT_WORKFLOW_ENABLED', False)}")
-
- # Upload PDF file for testing
- await self.uploadPdfFile()
-
- async def uploadPdfFile(self):
- """Upload the PDF file and store its file ID."""
- pdfPath = os.path.join(os.path.dirname(__file__), "..", "..", "..", "local", "temp", "B2025-02c.pdf")
- pdfPath = os.path.abspath(pdfPath)
-
- if not os.path.exists(pdfPath):
- print(f"⚠️ Warning: PDF file not found at {pdfPath}")
- print(" Test will continue without PDF attachment")
- return
-
- try:
- # Read PDF file
- with open(pdfPath, "rb") as f:
- pdfContent = f.read()
-
- # Create file using services.interfaceDbComponent
- if not hasattr(self.services, 'interfaceDbComponent') or not self.services.interfaceDbComponent:
- print("⚠️ Warning: interfaceDbComponent not available in services")
- print(" Test will continue without PDF attachment")
- return
-
- interfaceDbComponent = self.services.interfaceDbComponent
-
- fileItem = interfaceDbComponent.createFile(
- name="B2025-02c.pdf",
- mimeType="application/pdf",
- content=pdfContent
- )
-
- # Store file data
- interfaceDbComponent.createFileData(fileItem.id, pdfContent)
-
- self.pdfFileId = fileItem.id
- print(f"✅ Uploaded PDF file: {fileItem.fileName} (ID: {self.pdfFileId}, Size: {len(pdfContent)} bytes)")
-
- except Exception as e:
- import traceback
- print(f"⚠️ Warning: Failed to upload PDF file: {str(e)}")
- print(f" Traceback: {traceback.format_exc()}")
- print(" Test will continue without PDF attachment")
-
- def createTestPrompt(self, format: str) -> str:
- """Create a unified test prompt for document generation in the specified format.
-
- The prompt requests:
- - Extraction of images from the attached PDF
- - Generation of a new image
- - Document creation with both images
- """
- basePrompt = (
- "Create a professional document about 'Fuel Station Receipt Analysis' with the following content:\n"
- "1) A main title\n"
- "2) An introduction paragraph explaining the receipt analysis\n"
- "3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
- "4) A section analyzing the receipt data with bullet points\n"
- "5) Generate a new image showing a visual representation of fuel consumption trends\n"
- "6) A conclusion paragraph with recommendations\n\n"
- "Make sure to include both: the image extracted from the PDF and the newly generated image.\n"
- f"Format the output as {format.upper()}."
- )
- return basePrompt
-
- def createRefactoringTestPrompt(self, testType: str, format: str = "html") -> str:
- """Create test prompts for specific refactoring features.
-
- Args:
- testType: Type of refactoring test:
- - "intent_analysis": Test DocumentIntent analysis
- - "conditional_extraction": Test conditional extraction (extract vs render)
- - "image_render": Test image rendering as asset
- - "multi_document": Test multi-document rendering
- - "metadata_preservation": Test metadata preservation
- format: Output format (default: html)
- """
- prompts = {
- "intent_analysis": (
- "Create a document with the following requirements:\n"
- "1) Extract text content from the attached PDF\n"
- "2) Include images from the PDF as visual elements (render them, don't extract text from them)\n"
- "3) Generate a summary document\n\n"
- "This tests that the system correctly identifies which documents need extraction vs rendering."
- ),
- "conditional_extraction": (
- "Create a document that:\n"
- "1) Extracts and uses text from the attached PDF\n"
- "2) Renders images from the PDF as visual assets (not as extracted text)\n"
- "3) Generates new content based on the extracted text\n\n"
- "This tests conditional extraction - only extract what needs extraction, render what needs rendering."
- ),
- "image_render": (
- "Create a document that includes images from the attached PDF.\n"
- "The images should be rendered as visual elements in the document, not extracted as text.\n"
- "Include a title and description for each image.\n\n"
- "This tests the image asset pipeline with render intent."
- ),
- "multi_document": (
- "Create multiple separate documents:\n"
- "1) Document 1: Summary of the PDF content\n"
- "2) Document 2: Analysis of the PDF content\n"
- "3) Document 3: Recommendations based on the PDF content\n\n"
- "Each document should be separate and complete.\n"
- "This tests multi-document generation and rendering."
- ),
- "metadata_preservation": (
- "Create a document that extracts content from the attached PDF.\n"
- "The document should clearly show which content came from which source document.\n"
- "Include source references in the generated content.\n\n"
- "This tests that metadata (documentId, mimeType) is preserved in the generation prompt."
- )
- }
-
- prompt = prompts.get(testType, self.createTestPrompt(format))
- return f"{prompt}\n\nFormat the output as {format.upper()}."
-
- async def generateDocumentInFormat(self, format: str) -> Dict[str, Any]:
- """Generate a document in the specified format using workflow."""
- print("\n" + "="*80)
- print(f"GENERATING DOCUMENT IN {format.upper()} FORMAT")
- print("="*80)
-
- prompt = self.createTestPrompt(format)
- print(f"Prompt: {prompt[:200]}...")
-
- # Create user input request with PDF file attachment
- listFileId = []
- if self.pdfFileId:
- listFileId = [self.pdfFileId]
- print(f"Attaching PDF file (ID: {self.pdfFileId})")
- else:
- print("⚠️ No PDF file attached (file upload may have failed)")
-
- # Create user input request
- userInput = UserInputRequest(
- prompt=prompt,
- listFileId=listFileId,
- userLanguage="en"
- )
-
- # Start workflow
- print(f"\nStarting workflow for {format.upper()} generation...")
- workflow = await chatStart(
- currentUser=self.testUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- workflowId=None
- )
-
- if not workflow:
- return {
- "success": False,
- "error": "Failed to start workflow"
- }
-
- self.workflow = workflow
- print(f"Workflow started: {workflow.id}")
-
- # Wait for workflow completion (no timeout - wait indefinitely)
- print(f"Waiting for workflow completion...")
- completed = await self.waitForWorkflowCompletion(timeout=None)
-
- if not completed:
- return {
- "success": False,
- "error": "Workflow did not complete",
- "workflowId": workflow.id,
- "status": workflow.status if workflow else "unknown"
- }
-
- # Analyze results
- results = self.analyzeWorkflowResults()
-
- # Extract documents for this format
- documents = results.get("documents", [])
- formatDocuments = [d for d in documents if d.get("fileName", "").endswith(f".{format.lower()}")]
-
- return {
- "success": True,
- "format": format,
- "workflowId": workflow.id,
- "status": results.get("status"),
- "documentCount": len(formatDocuments),
- "documents": formatDocuments,
- "results": results
- }
-
- async def waitForWorkflowCompletion(self, timeout: Optional[int] = None, checkInterval: int = 2) -> bool:
- """Wait for workflow to complete."""
- if not self.workflow:
- return False
-
- startTime = time.time()
- lastStatus = None
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
-
- if timeout is None:
- print("Waiting indefinitely (no timeout)")
-
- while True:
- # Check timeout only if specified
- if timeout is not None and time.time() - startTime > timeout:
- print(f"\n⏱️ Timeout after {timeout} seconds")
- return False
-
- # Get current workflow status
- try:
- currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
- if not currentWorkflow:
- print("\n❌ Workflow not found")
- return False
-
- currentStatus = currentWorkflow.status
- elapsed = int(time.time() - startTime)
-
- # Print status if it changed
- if currentStatus != lastStatus:
- print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
- lastStatus = currentStatus
-
- # Check if workflow is complete
- if currentStatus in ["completed", "stopped", "failed"]:
- self.workflow = currentWorkflow
- statusIcon = "✅" if currentStatus == "completed" else "❌"
- print(f"\n{statusIcon} Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
- return currentStatus == "completed"
-
- # Wait before next check
- await asyncio.sleep(checkInterval)
-
- except Exception as e:
- print(f"\n⚠️ Error checking workflow status: {str(e)}")
- await asyncio.sleep(checkInterval)
-
- def analyzeWorkflowResults(self) -> Dict[str, Any]:
- """Analyze workflow results and extract information."""
- if not self.workflow:
- return {"error": "No workflow to analyze"}
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- workflow = interfaceDbChat.getWorkflow(self.workflow.id)
-
- if not workflow:
- return {"error": "Workflow not found"}
-
- # Get unified chat data
- chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
-
- # Count messages
- messages = chatData.get("messages", [])
- userMessages = [m for m in messages if m.get("role") == "user"]
- assistantMessages = [m for m in messages if m.get("role") == "assistant"]
-
- # Count documents
- documents = chatData.get("documents", [])
-
- # Get logs
- logs = chatData.get("logs", [])
-
- results = {
- "workflowId": workflow.id,
- "status": workflow.status,
- "workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
- "currentRound": workflow.currentRound,
- "totalTasks": workflow.totalTasks,
- "totalActions": workflow.totalActions,
- "messageCount": len(messages),
- "userMessageCount": len(userMessages),
- "assistantMessageCount": len(assistantMessages),
- "documentCount": len(documents),
- "logCount": len(logs),
- "documents": documents,
- "logs": logs
- }
-
- print(f"\nWorkflow Results:")
- print(f" Status: {results['status']}")
- print(f" Tasks: {results['totalTasks']}")
- print(f" Actions: {results['totalActions']}")
- print(f" Messages: {results['messageCount']}")
- print(f" Documents: {results['documentCount']}")
-
- # Print document details
- if documents:
- print(f"\nGenerated Documents:")
- for doc in documents:
- fileName = doc.get("fileName", "unknown")
- fileSize = doc.get("fileSize", 0)
- mimeType = doc.get("mimeType", "unknown")
- print(f" - {fileName} ({fileSize} bytes, {mimeType})")
-
- return results
-
- def verifyDocumentFormat(self, document: Dict[str, Any], expectedFormat: str) -> Dict[str, Any]:
- """Verify that a document matches the expected format."""
- fileName = document.get("fileName", "")
- mimeType = document.get("mimeType", "")
- fileSize = document.get("fileSize", 0)
-
- # Expected MIME types
- expectedMimeTypes = {
- "html": ["text/html", "application/xhtml+xml"],
- "pdf": ["application/pdf"],
- "docx": ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"],
- "xlsx": ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"],
- "pptx": ["application/vnd.openxmlformats-officedocument.presentationml.presentation"]
- }
-
- # Expected file extensions
- expectedExtensions = {
- "html": [".html", ".htm"],
- "pdf": [".pdf"],
- "docx": [".docx"],
- "xlsx": [".xlsx"],
- "pptx": [".pptx"]
- }
-
- formatLower = expectedFormat.lower()
- expectedMimes = expectedMimeTypes.get(formatLower, [])
- expectedExts = expectedExtensions.get(formatLower, [])
-
- # Check file extension
- hasCorrectExtension = any(fileName.lower().endswith(ext) for ext in expectedExts)
-
- # Check MIME type
- hasCorrectMimeType = any(mimeType.lower() == mime.lower() for mime in expectedMimes)
-
- # Check file size (should be > 0)
- hasValidSize = fileSize > 0
-
- verification = {
- "format": expectedFormat,
- "fileName": fileName,
- "mimeType": mimeType,
- "fileSize": fileSize,
- "hasCorrectExtension": hasCorrectExtension,
- "hasCorrectMimeType": hasCorrectMimeType,
- "hasValidSize": hasValidSize,
- "isValid": hasCorrectExtension and hasValidSize
- }
-
- return verification
-
- async def testRefactoringFeatures(self) -> Dict[str, Any]:
- """Test specific refactoring features."""
- print("\n" + "="*80)
- print("TESTING REFACTORING FEATURES")
- print("="*80)
-
- refactoringTests = [
- ("intent_analysis", "html"),
- ("conditional_extraction", "html"),
- ("image_render", "html"),
- ("multi_document", "html"),
- ("metadata_preservation", "html")
- ]
-
- results = {}
-
- for testType, format in refactoringTests:
- try:
- print(f"\n{'='*80}")
- print(f"Testing Refactoring Feature: {testType}")
- print(f"{'='*80}")
-
- prompt = self.createRefactoringTestPrompt(testType, format)
- print(f"Prompt: {prompt[:200]}...")
-
- # Create user input request with PDF file attachment
- listFileId = []
- if self.pdfFileId:
- listFileId = [self.pdfFileId]
- print(f"Attaching PDF file (ID: {self.pdfFileId})")
- else:
- print("⚠️ No PDF file attached (file upload may have failed)")
-
- userInput = UserInputRequest(
- prompt=prompt,
- listFileId=listFileId,
- userLanguage="en"
- )
-
- # Start workflow
- print(f"\nStarting workflow for {testType} test...")
- workflow = await chatStart(
- currentUser=self.testUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- workflowId=None
- )
-
- if not workflow:
- results[testType] = {
- "success": False,
- "error": "Failed to start workflow"
- }
- continue
-
- self.workflow = workflow
- print(f"Workflow started: {workflow.id}")
-
- # Wait for workflow completion (no timeout - wait indefinitely)
- completed = await self.waitForWorkflowCompletion(timeout=None)
-
- if not completed:
- results[testType] = {
- "success": False,
- "error": "Workflow did not complete",
- "workflowId": workflow.id
- }
- continue
-
- # Analyze results
- workflowResults = self.analyzeWorkflowResults()
-
- # Check for specific refactoring features
- verification = self.verifyRefactoringFeature(testType, workflowResults)
-
- results[testType] = {
- "success": True,
- "workflowId": workflow.id,
- "verification": verification,
- "workflowResults": workflowResults
- }
-
- print(f"\n✅ {testType} test completed!")
- print(f" Verification: {'✅ PASS' if verification.get('passed', False) else '❌ FAIL'}")
- if verification.get("details"):
- for detail in verification["details"]:
- print(f" - {detail}")
-
- await asyncio.sleep(2)
-
- except Exception as e:
- import traceback
- print(f"\n❌ Error testing {testType}: {str(e)}")
- print(traceback.format_exc())
- results[testType] = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- return results
-
- def verifyRefactoringFeature(self, testType: str, workflowResults: Dict[str, Any]) -> Dict[str, Any]:
- """Verify that a refactoring feature works correctly."""
- documents = workflowResults.get("documents", [])
- logs = workflowResults.get("logs", [])
-
- verification = {
- "testType": testType,
- "passed": False,
- "details": []
- }
-
- if testType == "intent_analysis":
- # Check that intent analysis was performed
- intentLogs = [log for log in logs if "intent" in str(log).lower() or "analyzing document intent" in str(log).lower()]
- if intentLogs:
- verification["details"].append("Intent analysis logs found")
- verification["passed"] = True
- else:
- verification["details"].append("No intent analysis logs found")
-
- elif testType == "conditional_extraction":
- # Check that extraction and rendering both occurred
- extractionLogs = [log for log in logs if "extract" in str(log).lower()]
- renderLogs = [log for log in logs if "render" in str(log).lower() or "image" in str(log).lower()]
- if extractionLogs and renderLogs:
- verification["details"].append("Both extraction and rendering occurred")
- verification["passed"] = True
- else:
- verification["details"].append(f"Missing logs: extraction={len(extractionLogs)}, render={len(renderLogs)}")
-
- elif testType == "image_render":
- # Check that images were rendered (not extracted as text)
- imageLogs = [log for log in logs if "image" in str(log).lower()]
- if imageLogs:
- verification["details"].append("Image rendering logs found")
- verification["passed"] = True
- else:
- verification["details"].append("No image rendering logs found")
-
- elif testType == "multi_document":
- # Check that multiple documents were generated
- if len(documents) >= 2:
- verification["details"].append(f"Multiple documents generated: {len(documents)}")
- verification["passed"] = True
- else:
- verification["details"].append(f"Expected multiple documents, got {len(documents)}")
-
- elif testType == "metadata_preservation":
- # Check that metadata was preserved (check logs for documentId references)
- metadataLogs = [log for log in logs if "documentId" in str(log) or "SOURCE:" in str(log)]
- if metadataLogs:
- verification["details"].append("Metadata preservation logs found")
- verification["passed"] = True
- else:
- verification["details"].append("No metadata preservation logs found")
-
- return verification
-
- async def testAllFormats(self) -> Dict[str, Any]:
- """Test document generation in all formats."""
- print("\n" + "="*80)
- print("TESTING DOCUMENT GENERATION IN ALL FORMATS")
- print("="*80)
-
- formats = ["html", "pdf", "docx", "xlsx", "pptx"]
- results = {}
-
- for format in formats:
- try:
- print(f"\n{'='*80}")
- print(f"Testing {format.upper()} format...")
- print(f"{'='*80}")
-
- result = await self.generateDocumentInFormat(format)
- results[format] = result
-
- if result.get("success"):
- documents = result.get("documents", [])
- if documents:
- # Verify first document
- verification = self.verifyDocumentFormat(documents[0], format)
- result["verification"] = verification
-
- print(f"\n✅ {format.upper()} generation successful!")
- print(f" Documents: {len(documents)}")
- print(f" Verification: {'✅ PASS' if verification['isValid'] else '❌ FAIL'}")
- if verification.get("fileName"):
- print(f" File: {verification['fileName']}")
- print(f" Size: {verification['fileSize']} bytes")
- print(f" MIME: {verification['mimeType']}")
- else:
- print(f"\n⚠️ {format.upper()} generation completed but no documents found")
- else:
- error = result.get("error", "Unknown error")
- print(f"\n❌ {format.upper()} generation failed: {error}")
-
- # Small delay between tests
- await asyncio.sleep(2)
-
- except Exception as e:
- import traceback
- print(f"\n❌ Error testing {format.upper()}: {str(e)}")
- print(traceback.format_exc())
- results[format] = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- return results
-
- async def runTest(self, includeRefactoringTests: bool = True):
- """Run the complete test.
-
- Args:
- includeRefactoringTests: If True, also run refactoring feature tests
- """
- print("\n" + "="*80)
- print("DOCUMENT GENERATION FORMATS TEST")
- print("="*80)
-
- try:
- # Initialize
- await self.initialize()
-
- # Test refactoring features first (if enabled)
- refactoringResults = {}
- if includeRefactoringTests:
- refactoringResults = await self.testRefactoringFeatures()
-
- # Test all formats
- formatResults = await self.testAllFormats()
-
- # Summary
- print("\n" + "="*80)
- print("TEST SUMMARY")
- print("="*80)
-
- # Refactoring tests summary
- refactoringSuccessCount = 0
- refactoringFailCount = 0
- if includeRefactoringTests and refactoringResults:
- print("\nRefactoring Features:")
- for testType, result in refactoringResults.items():
- if result.get("success"):
- refactoringSuccessCount += 1
- verification = result.get("verification", {})
- passed = verification.get("passed", False)
- statusIcon = "✅" if passed else "⚠️"
- print(f"{statusIcon} {testType:25s}: {'PASS' if passed else 'FAIL'}")
- else:
- refactoringFailCount += 1
- error = result.get("error", "Unknown error")
- print(f"❌ {testType:25s}: FAIL - {error}")
- print(f"Refactoring Tests: {refactoringSuccessCount} passed, {refactoringFailCount} failed out of {len(refactoringResults)} tests")
-
- # Format tests summary
- print("\nFormat Tests:")
- successCount = 0
- failCount = 0
-
- for format, result in formatResults.items():
- if result.get("success"):
- successCount += 1
- status = "✅ PASS"
- docCount = result.get("documentCount", 0)
- verification = result.get("verification", {})
- isValid = verification.get("isValid", False)
- statusIcon = "✅" if isValid else "⚠️"
- print(f"{statusIcon} {format.upper():6s}: {status} - {docCount} document(s)")
- else:
- failCount += 1
- error = result.get("error", "Unknown error")
- print(f"❌ {format.upper():6s}: FAIL - {error}")
-
- print(f"\nFormat Tests: {successCount} passed, {failCount} failed out of {len(formatResults)} formats")
-
- # Calculate totals
- totalSuccess = successCount + refactoringSuccessCount if includeRefactoringTests else successCount
- totalFail = failCount + refactoringFailCount if includeRefactoringTests else failCount
-
- self.testResults = {
- "success": failCount == 0 and (not includeRefactoringTests or refactoringFailCount == 0),
- "formatTests": {
- "successCount": successCount,
- "failCount": failCount,
- "totalFormats": len(formatResults),
- "results": formatResults
- },
- "refactoringTests": {
- "successCount": refactoringSuccessCount if includeRefactoringTests else 0,
- "failCount": refactoringFailCount if includeRefactoringTests else 0,
- "totalTests": len(refactoringResults) if includeRefactoringTests else 0,
- "results": refactoringResults if includeRefactoringTests else {}
- },
- "totalSuccess": totalSuccess,
- "totalFail": totalFail
- }
-
- return self.testResults
-
- except Exception as e:
- import traceback
- print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
- print(f"Traceback:\n{traceback.format_exc()}")
- self.testResults = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
- return self.testResults
-
-
-async def main():
- """Run document generation formats test."""
- tester = DocumentGenerationFormatsTester()
- results = await tester.runTest()
-
- # Print final results as JSON for easy parsing
- print("\n" + "="*80)
- print("FINAL RESULTS (JSON)")
- print("="*80)
- print(json.dumps(results, indent=2, default=str))
-
-
-if __name__ == "__main__":
- asyncio.run(main())
-
diff --git a/tests/functional/test10_document_generation_formats.py b/tests/functional/test10_document_generation_formats.py
deleted file mode 100644
index 043f0690..00000000
--- a/tests/functional/test10_document_generation_formats.py
+++ /dev/null
@@ -1,558 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Document Generation Formats Test 10 - Tests document generation in DOCX, XLSX, PPTX, and PDF formats
-Tests professional document formats with various content types including tables, images, and structured data.
-"""
-
-import asyncio
-import json
-import sys
-import os
-import time
-import base64
-from typing import Dict, Any, List, Optional
-
-# Add the gateway to path (go up 2 levels from tests/functional/)
-_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
-if _gateway_path not in sys.path:
- sys.path.insert(0, _gateway_path)
-
-# Import the service initialization
-from modules.serviceHub import getInterface as getServices
-from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
-from modules.datamodels.datamodelUam import User
-from modules.workflows.automation import chatStart
-import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
-
-
-class DocumentGenerationFormatsTester10:
- def __init__(self):
- # Use root user for testing (has full access to everything)
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.datamodels.datamodelUam import Mandate
- rootInterface = getRootInterface()
- self.testUser = rootInterface.currentUser
- # Get initial mandate ID for testing (User has no mandateId - use initial mandate)
- self.testMandateId = rootInterface.getInitialId(Mandate)
-
- # Initialize services using the existing system
- self.services = getServices(self.testUser, None) # Test user, no workflow
- self.workflow = None
- self.testResults = {}
- self.generatedDocuments = {}
- self.pdfFileId = None # Store PDF file ID for reuse
-
- async def initialize(self):
- """Initialize the test environment."""
- # Enable debug file logging for tests
- from modules.shared.configuration import APP_CONFIG
- APP_CONFIG.set("APP_DEBUG_CHAT_WORKFLOW_ENABLED", True)
-
- # Set logging level to INFO to see workflow progress
- import logging
- logging.getLogger().setLevel(logging.INFO)
-
- print(f"Initialized test with user: {self.testUser.id}")
- print(f"Test Mandate ID: {self.testMandateId}")
- print(f"Debug logging enabled: {APP_CONFIG.get('APP_DEBUG_CHAT_WORKFLOW_ENABLED', False)}")
-
- # Upload PDF file for testing
- await self.uploadPdfFile()
-
- async def uploadPdfFile(self):
- """Upload the PDF file and store its file ID."""
- pdfPath = os.path.join(os.path.dirname(__file__), "..", "..", "..", "local", "temp", "B2025-02c.pdf")
- pdfPath = os.path.abspath(pdfPath)
-
- if not os.path.exists(pdfPath):
- print(f"⚠️ Warning: PDF file not found at {pdfPath}")
- print(" Test will continue without PDF attachment")
- return
-
- try:
- # Read PDF file
- with open(pdfPath, "rb") as f:
- pdfContent = f.read()
-
- # Create file using services.interfaceDbComponent
- if not hasattr(self.services, 'interfaceDbComponent') or not self.services.interfaceDbComponent:
- print("⚠️ Warning: interfaceDbComponent not available in services")
- print(" Test will continue without PDF attachment")
- return
-
- interfaceDbComponent = self.services.interfaceDbComponent
-
- fileItem = interfaceDbComponent.createFile(
- name="B2025-02c.pdf",
- mimeType="application/pdf",
- content=pdfContent
- )
-
- # Store file data
- interfaceDbComponent.createFileData(fileItem.id, pdfContent)
-
- self.pdfFileId = fileItem.id
- print(f"✅ Uploaded PDF file: {fileItem.fileName} (ID: {self.pdfFileId}, Size: {len(pdfContent)} bytes)")
-
- except Exception as e:
- import traceback
- print(f"⚠️ Warning: Failed to upload PDF file: {str(e)}")
- print(f" Traceback: {traceback.format_exc()}")
- print(" Test will continue without PDF attachment")
-
- def createTestPrompt(self, format: str) -> str:
- """Create a test prompt for document generation in the specified format.
-
- The prompt requests:
- - Professional document structure with title, sections, tables, and images
- - Extraction of content from attached PDF
- - Structured data presentation appropriate for the format
- """
- formatPrompts = {
- "docx": (
- "Create a professional Word document about 'Fuel Station Receipt Analysis' with:\n"
- "1) A main title\n"
- "2) An executive summary paragraph\n"
- "3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
- "4) A detailed analysis section with:\n"
- " - Bullet points of key findings\n"
- " - A table summarizing transaction details\n"
- "5) A conclusion section with recommendations\n\n"
- "Format as a professional DOCX document with proper headings and structure."
- ),
- "xlsx": (
- "Create an Excel spreadsheet analyzing the fuel station receipt from the attached PDF (B2025-02c.pdf).\n"
- "Include:\n"
- "1) A summary sheet with key metrics\n"
- "2) A detailed data sheet with:\n"
- " - Transaction details in rows\n"
- " - Columns for: Date, Item, Quantity, Price, Total\n"
- " - Proper formatting and headers\n"
- "3) A calculations sheet with:\n"
- " - VAT calculations\n"
- " - Net and gross totals\n\n"
- "Format as a professional XLSX spreadsheet with formulas and formatting."
- ),
- "pptx": (
- "Create a PowerPoint presentation about 'Fuel Station Receipt Analysis' with:\n"
- "1) Title slide with main title\n"
- "2) Overview slide explaining the receipt analysis\n"
- "3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
- "4) Analysis slides with:\n"
- " - Bullet points of key findings\n"
- " - Visual representation of data\n"
- "5) Conclusion slide with recommendations\n\n"
- "Format as a professional PPTX presentation with consistent styling."
- ),
- "pdf": (
- "Create a professional PDF document about 'Fuel Station Receipt Analysis' with:\n"
- "1) A main title\n"
- "2) An introduction paragraph explaining the receipt analysis\n"
- "3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
- "4) A section analyzing the receipt data with:\n"
- " - Bullet points of key findings\n"
- " - A table summarizing transaction details\n"
- "5) A conclusion paragraph with recommendations\n\n"
- "Format as a professional PDF document suitable for printing."
- ),
- "html": (
- "Create a professional HTML document about 'Fuel Station Receipt Analysis' with:\n"
- "1) A main title\n"
- "2) An introduction paragraph explaining the receipt analysis\n"
- "3) Extract and include the image from the attached PDF document (B2025-02c.pdf)\n"
- "4) A section analyzing the receipt data with:\n"
- " - Bullet points of key findings\n"
- " - A table summarizing transaction details\n"
- "5) A conclusion paragraph with recommendations\n\n"
- "Format as a professional HTML document with proper styling, responsive design, and embedded CSS."
- )
- }
-
- return formatPrompts.get(format.lower(), formatPrompts["docx"])
-
- async def generateDocumentInFormat(self, format: str) -> Dict[str, Any]:
- """Generate a document in the specified format using workflow."""
- print("\n" + "="*80)
- print(f"GENERATING DOCUMENT IN {format.upper()} FORMAT")
- print("="*80)
-
- prompt = self.createTestPrompt(format)
- print(f"Prompt: {prompt[:200]}...")
-
- # Create user input request with PDF file attachment
- listFileId = []
- if self.pdfFileId:
- listFileId = [self.pdfFileId]
- print(f"Attaching PDF file (ID: {self.pdfFileId})")
- else:
- print("⚠️ No PDF file attached (file upload may have failed)")
-
- # Create user input request
- userInput = UserInputRequest(
- prompt=prompt,
- listFileId=listFileId,
- userLanguage="en"
- )
-
- # Start workflow
- print(f"\nStarting workflow for {format.upper()} generation...")
- workflow = await chatStart(
- currentUser=self.testUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- workflowId=None
- )
-
- if not workflow:
- return {
- "success": False,
- "error": "Failed to start workflow"
- }
-
- self.workflow = workflow
- print(f"Workflow started: {workflow.id}")
-
- # Wait for workflow completion (no timeout - wait indefinitely)
- print(f"Waiting for workflow completion...")
- completed = await self.waitForWorkflowCompletion(timeout=None)
-
- if not completed:
- return {
- "success": False,
- "error": "Workflow did not complete",
- "workflowId": workflow.id,
- "status": workflow.status if workflow else "unknown"
- }
-
- # Analyze results
- results = self.analyzeWorkflowResults()
-
- # Extract documents for this format
- documents = results.get("documents", [])
- formatDocuments = [d for d in documents if d.get("fileName", "").endswith(f".{format.lower()}")]
-
- return {
- "success": True,
- "format": format,
- "workflowId": workflow.id,
- "status": results.get("status"),
- "documentCount": len(formatDocuments),
- "documents": formatDocuments,
- "results": results
- }
-
- async def waitForWorkflowCompletion(self, timeout: Optional[int] = None, checkInterval: int = 2) -> bool:
- """Wait for workflow to complete."""
- if not self.workflow:
- return False
-
- startTime = time.time()
- lastStatus = None
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
-
- if timeout is None:
- print("Waiting indefinitely (no timeout)")
-
- while True:
- # Check timeout only if specified
- if timeout is not None and time.time() - startTime > timeout:
- print(f"\n⏱️ Timeout after {timeout} seconds")
- return False
-
- # Get current workflow status
- try:
- currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
- if not currentWorkflow:
- print("\n❌ Workflow not found")
- return False
-
- currentStatus = currentWorkflow.status
- elapsed = int(time.time() - startTime)
-
- # Print status if it changed
- if currentStatus != lastStatus:
- print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
- lastStatus = currentStatus
-
- # Check if workflow is complete
- if currentStatus in ["completed", "stopped", "failed"]:
- self.workflow = currentWorkflow
- statusIcon = "✅" if currentStatus == "completed" else "❌"
- print(f"\n{statusIcon} Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
- return currentStatus == "completed"
-
- # Wait before next check
- await asyncio.sleep(checkInterval)
-
- except Exception as e:
- print(f"\n⚠️ Error checking workflow status: {str(e)}")
- await asyncio.sleep(checkInterval)
-
- def analyzeWorkflowResults(self) -> Dict[str, Any]:
- """Analyze workflow results and extract information."""
- if not self.workflow:
- return {"error": "No workflow to analyze"}
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- workflow = interfaceDbChat.getWorkflow(self.workflow.id)
-
- if not workflow:
- return {"error": "Workflow not found"}
-
- # Get unified chat data
- chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
-
- # Count messages
- messages = chatData.get("messages", [])
- userMessages = [m for m in messages if m.get("role") == "user"]
- assistantMessages = [m for m in messages if m.get("role") == "assistant"]
-
- # Count documents
- documents = chatData.get("documents", [])
-
- # Get logs
- logs = chatData.get("logs", [])
-
- results = {
- "workflowId": workflow.id,
- "status": workflow.status,
- "workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
- "currentRound": workflow.currentRound,
- "totalTasks": workflow.totalTasks,
- "totalActions": workflow.totalActions,
- "messageCount": len(messages),
- "userMessageCount": len(userMessages),
- "assistantMessageCount": len(assistantMessages),
- "documentCount": len(documents),
- "logCount": len(logs),
- "documents": documents,
- "logs": logs
- }
-
- print(f"\nWorkflow Results:")
- print(f" Status: {results['status']}")
- print(f" Tasks: {results['totalTasks']}")
- print(f" Actions: {results['totalActions']}")
- print(f" Messages: {results['messageCount']}")
- print(f" Documents: {results['documentCount']}")
-
- # Print document details
- if documents:
- print(f"\nGenerated Documents:")
- for doc in documents:
- fileName = doc.get("fileName", "unknown")
- fileSize = doc.get("fileSize", 0)
- mimeType = doc.get("mimeType", "unknown")
- documentType = doc.get("documentType", "N/A")
- print(f" - {fileName} ({fileSize} bytes, {mimeType}, type: {documentType})")
-
- return results
-
- def verifyDocumentFormat(self, document: Dict[str, Any], expectedFormat: str) -> Dict[str, Any]:
- """Verify that a document matches the expected format and contains expected metadata."""
- fileName = document.get("fileName", "")
- mimeType = document.get("mimeType", "")
- fileSize = document.get("fileSize", 0)
- documentType = document.get("documentType")
- metadata = document.get("metadata")
-
- # Expected MIME types
- expectedMimeTypes = {
- "pdf": ["application/pdf"],
- "docx": ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"],
- "xlsx": ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"],
- "pptx": ["application/vnd.openxmlformats-officedocument.presentationml.presentation"],
- "html": ["text/html", "application/xhtml+xml"]
- }
-
- # Expected file extensions
- expectedExtensions = {
- "pdf": [".pdf"],
- "docx": [".docx"],
- "xlsx": [".xlsx"],
- "pptx": [".pptx"],
- "html": [".html", ".htm"]
- }
-
- formatLower = expectedFormat.lower()
- expectedMimes = expectedMimeTypes.get(formatLower, [])
- expectedExts = expectedExtensions.get(formatLower, [])
-
- # Check file extension
- hasCorrectExtension = any(fileName.lower().endswith(ext) for ext in expectedExts)
-
- # Check MIME type
- hasCorrectMimeType = any(mimeType.lower() == mime.lower() for mime in expectedMimes)
-
- # Check file size (should be > 0)
- hasValidSize = fileSize > 0
-
- # Check document type (should be present)
- hasDocumentType = documentType is not None
-
- # Check metadata (should be present)
- hasMetadata = metadata is not None and isinstance(metadata, dict)
-
- verification = {
- "format": expectedFormat,
- "fileName": fileName,
- "mimeType": mimeType,
- "fileSize": fileSize,
- "documentType": documentType,
- "hasMetadata": hasMetadata,
- "hasCorrectExtension": hasCorrectExtension,
- "hasCorrectMimeType": hasCorrectMimeType,
- "hasValidSize": hasValidSize,
- "hasDocumentType": hasDocumentType,
- "isValid": hasCorrectExtension and hasValidSize and hasCorrectMimeType,
- "isComplete": hasCorrectExtension and hasValidSize and hasCorrectMimeType and hasDocumentType and hasMetadata
- }
-
- return verification
-
- async def testAllFormats(self) -> Dict[str, Any]:
- """Test document generation in DOCX, XLSX, PPTX, PDF, and HTML formats."""
- print("\n" + "="*80)
- print("TESTING DOCUMENT GENERATION IN ALL FORMATS")
- print("="*80)
-
- # Test all document formats
- formats = ["docx", "xlsx", "pptx", "pdf", "html"]
- results = {}
-
- for format in formats:
- try:
- print(f"\n{'='*80}")
- print(f"Testing {format.upper()} format...")
- print(f"{'='*80}")
-
- result = await self.generateDocumentInFormat(format)
- results[format] = result
-
- if result.get("success"):
- documents = result.get("documents", [])
- if documents:
- # Verify first document
- verification = self.verifyDocumentFormat(documents[0], format)
- result["verification"] = verification
-
- print(f"\n✅ {format.upper()} generation successful!")
- print(f" Documents: {len(documents)}")
- print(f" Verification: {'✅ PASS' if verification['isValid'] else '❌ FAIL'}")
- print(f" Complete (with metadata): {'✅ YES' if verification['isComplete'] else '❌ NO'}")
- if verification.get("fileName"):
- print(f" File: {verification['fileName']}")
- print(f" Size: {verification['fileSize']} bytes")
- print(f" MIME: {verification['mimeType']}")
- print(f" Document Type: {verification.get('documentType', 'N/A')}")
- print(f" Has Metadata: {'✅' if verification.get('hasMetadata') else '❌'}")
- else:
- print(f"\n⚠️ {format.upper()} generation completed but no documents found")
- else:
- error = result.get("error", "Unknown error")
- print(f"\n❌ {format.upper()} generation failed: {error}")
-
- # Small delay between tests
- await asyncio.sleep(2)
-
- except Exception as e:
- import traceback
- print(f"\n❌ Error testing {format.upper()}: {str(e)}")
- print(traceback.format_exc())
- results[format] = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- return results
-
- async def runTest(self):
- """Run the complete test."""
- print("\n" + "="*80)
- print("DOCUMENT GENERATION FORMATS TEST 10 - ALL FORMATS")
- print("="*80)
-
- try:
- # Initialize
- await self.initialize()
-
- # Test all formats
- formatResults = await self.testAllFormats()
-
- # Summary
- print("\n" + "="*80)
- print("TEST SUMMARY")
- print("="*80)
-
- # Format tests summary
- print("\nFormat Tests:")
- successCount = 0
- failCount = 0
- completeCount = 0 # Documents with metadata
-
- for format, result in formatResults.items():
- if result.get("success"):
- successCount += 1
- verification = result.get("verification", {})
- isValid = verification.get("isValid", False)
- isComplete = verification.get("isComplete", False)
- if isComplete:
- completeCount += 1
- statusIcon = "✅" if isValid else "⚠️"
- completeIcon = "✅" if isComplete else "❌"
- docCount = result.get("documentCount", 0)
- print(f"{statusIcon} {format.upper():6s}: {'PASS' if isValid else 'FAIL'} - {docCount} document(s) - Metadata: {completeIcon}")
- else:
- failCount += 1
- error = result.get("error", "Unknown error")
- print(f"❌ {format.upper():6s}: FAIL - {error}")
-
- print(f"\nFormat Tests: {successCount} passed, {failCount} failed out of {len(formatResults)} formats")
- print(f"Complete Documents (with metadata): {completeCount} out of {successCount} successful generations")
-
- self.testResults = {
- "success": failCount == 0,
- "formatTests": {
- "successCount": successCount,
- "failCount": failCount,
- "completeCount": completeCount,
- "totalFormats": len(formatResults),
- "results": formatResults
- },
- "totalSuccess": successCount,
- "totalFail": failCount
- }
-
- return self.testResults
-
- except Exception as e:
- import traceback
- print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
- print(f"Traceback:\n{traceback.format_exc()}")
- self.testResults = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
- return self.testResults
-
-
-async def main():
- """Run document generation formats test 10."""
- tester = DocumentGenerationFormatsTester10()
- results = await tester.runTest()
-
- # Print final results as JSON for easy parsing
- print("\n" + "="*80)
- print("FINAL RESULTS (JSON)")
- print("="*80)
- print(json.dumps(results, indent=2, default=str))
-
-
-if __name__ == "__main__":
- asyncio.run(main())
-
diff --git a/tests/functional/test11_code_generation_formats.py b/tests/functional/test11_code_generation_formats.py
deleted file mode 100644
index cf410e71..00000000
--- a/tests/functional/test11_code_generation_formats.py
+++ /dev/null
@@ -1,559 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2025 Patrick Motsch
-# All rights reserved.
-"""
-Code Generation Formats Test 11 - Tests code generation in JSON, CSV, and XML formats
-Tests code generation with structured data formats including validation and formatting.
-"""
-
-import asyncio
-import json
-import sys
-import os
-import time
-import csv
-import io
-import xml.etree.ElementTree as ET
-from typing import Dict, Any, List, Optional
-
-# Add the gateway to path (go up 2 levels from tests/functional/)
-_gateway_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
-if _gateway_path not in sys.path:
- sys.path.insert(0, _gateway_path)
-
-# Import the service initialization
-from modules.serviceHub import getInterface as getServices
-from modules.datamodels.datamodelChat import UserInputRequest, WorkflowModeEnum
-from modules.datamodels.datamodelUam import User
-from modules.workflows.automation import chatStart
-import modules.interfaces.interfaceDbChat as interfaceFeatureAiChat
-
-
-class CodeGenerationFormatsTester11:
- def __init__(self):
- # Use root user for testing (has full access to everything)
- from modules.interfaces.interfaceDbApp import getRootInterface
- from modules.datamodels.datamodelUam import Mandate
- rootInterface = getRootInterface()
- self.testUser = rootInterface.currentUser
- # Get initial mandate ID for testing (User has no mandateId - use initial mandate)
- self.testMandateId = rootInterface.getInitialId(Mandate)
-
- # Initialize services using the existing system
- self.services = getServices(self.testUser, None) # Test user, no workflow
- self.workflow = None
- self.testResults = {}
- self.generatedDocuments = {}
-
- async def initialize(self):
- """Initialize the test environment."""
- # Enable debug file logging for tests
- from modules.shared.configuration import APP_CONFIG
- APP_CONFIG.set("APP_DEBUG_CHAT_WORKFLOW_ENABLED", True)
-
- # Set logging level to INFO to see workflow progress
- import logging
- logging.getLogger().setLevel(logging.INFO)
-
- print(f"Initialized test with user: {self.testUser.id}")
- print(f"Test Mandate ID: {self.testMandateId}")
- print(f"Debug logging enabled: {APP_CONFIG.get('APP_DEBUG_CHAT_WORKFLOW_ENABLED', False)}")
-
- def createTestPrompt(self, format: str) -> str:
- """Create a test prompt for code generation in the specified format.
-
- The prompt requests 3 files for each format:
- - Structured data generation appropriate for the format
- - Proper formatting and validation
- """
- formatPrompts = {
- "json": (
- "Generate 3 JSON code files for a customer management system:\n"
- "1) Create a config.json file with:\n"
- " - Application name: 'Customer Manager'\n"
- " - Version: '1.0.0'\n"
- " - Database settings: host, port, name\n"
- " - API settings: baseUrl, timeout\n"
- "2) Create a customers.json file with an array of customer objects:\n"
- " - Each customer should have: id, name, email, phone, address\n"
- " - Include at least 3 sample customers\n"
- "3) Create a settings.json file with:\n"
- " - Theme settings: darkMode, fontSize, language\n"
- " - Notification settings: email, sms, push\n"
- " - Feature flags: enableAnalytics, enableReports\n\n"
- "Format all files as valid JSON with proper indentation."
- ),
- "csv": (
- "Generate 3 CSV code files for expense tracking:\n"
- "1) Create an expenses.csv file with:\n"
- " - Header row: Documentname, Datum, Händler, Kreditkartennummer, Gesamtbetrag, Währung, MWST-Satz\n"
- " - Data rows with at least 5 expense entries\n"
- " - Use consistent date format (DD.MM.YYYY)\n"
- " - Use CHF as currency\n"
- " - Use 7.7% as VAT rate\n"
- "2) Create a categories.csv file with:\n"
- " - Header row: CategoryID, CategoryName, Description, ParentCategory\n"
- " - Data rows with at least 8 categories\n"
- "3) Create a vendors.csv file with:\n"
- " - Header row: VendorID, VendorName, ContactPerson, Email, Phone, Address\n"
- " - Data rows with at least 6 vendors\n\n"
- "Format all files as valid CSV with proper header row and consistent column count."
- ),
- "xml": (
- "Generate 3 XML code files for a product catalog:\n"
- "1) Create a products.xml file with:\n"
- " - Root element: \n"
- " - Each product as element with:\n"
- " - , , , , \n"
- " - Include at least 4 products\n"
- "2) Create a categories.xml file with:\n"
- " - Root element: \n"
- " - Each category as element with:\n"
- " - , , , \n"
- " - Include at least 5 categories\n"
- "3) Create a suppliers.xml file with:\n"
- " - Root element: \n"
- " - Each supplier as element with:\n"
- " - , , , \n"
- " - Include at least 3 suppliers\n\n"
- "Format all files as valid XML with proper indentation and structure."
- )
- }
-
- return formatPrompts.get(format.lower(), formatPrompts["json"])
-
- async def generateCodeInFormat(self, format: str) -> Dict[str, Any]:
- """Generate code in the specified format using workflow."""
- print("\n" + "="*80)
- print(f"GENERATING CODE IN {format.upper()} FORMAT")
- print("="*80)
-
- prompt = self.createTestPrompt(format)
- print(f"Prompt: {prompt[:200]}...")
-
- # Create user input request
- userInput = UserInputRequest(
- prompt=prompt,
- listFileId=[],
- userLanguage="en"
- )
-
- # Start workflow
- print(f"\nStarting workflow for {format.upper()} code generation...")
- workflow = await chatStart(
- currentUser=self.testUser,
- userInput=userInput,
- workflowMode=WorkflowModeEnum.WORKFLOW_DYNAMIC,
- workflowId=None
- )
-
- if not workflow:
- return {
- "success": False,
- "error": "Failed to start workflow"
- }
-
- self.workflow = workflow
- print(f"Workflow started: {workflow.id}")
-
- # Wait for workflow completion (no timeout - wait indefinitely)
- print(f"Waiting for workflow completion...")
- completed = await self.waitForWorkflowCompletion(timeout=None)
-
- if not completed:
- return {
- "success": False,
- "error": "Workflow did not complete",
- "workflowId": workflow.id,
- "status": workflow.status if workflow else "unknown"
- }
-
- # Analyze results
- results = self.analyzeWorkflowResults()
-
- # Extract documents for this format
- documents = results.get("documents", [])
- formatDocuments = [d for d in documents if d.get("fileName", "").endswith(f".{format.lower()}")]
-
- return {
- "success": True,
- "format": format,
- "workflowId": workflow.id,
- "status": results.get("status"),
- "documentCount": len(formatDocuments),
- "documents": formatDocuments,
- "results": results
- }
-
- async def waitForWorkflowCompletion(self, timeout: Optional[int] = None, checkInterval: int = 2) -> bool:
- """Wait for workflow to complete."""
- if not self.workflow:
- return False
-
- startTime = time.time()
- lastStatus = None
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
-
- if timeout is None:
- print("Waiting indefinitely (no timeout)")
-
- while True:
- # Check timeout only if specified
- if timeout is not None and time.time() - startTime > timeout:
- print(f"\n⏱️ Timeout after {timeout} seconds")
- return False
-
- # Get current workflow status
- try:
- currentWorkflow = interfaceDbChat.getWorkflow(self.workflow.id)
- if not currentWorkflow:
- print("\n❌ Workflow not found")
- return False
-
- currentStatus = currentWorkflow.status
- elapsed = int(time.time() - startTime)
-
- # Print status if it changed
- if currentStatus != lastStatus:
- print(f"Workflow status: {currentStatus} (elapsed: {elapsed}s)")
- lastStatus = currentStatus
-
- # Check if workflow is complete
- if currentStatus in ["completed", "stopped", "failed"]:
- self.workflow = currentWorkflow
- statusIcon = "✅" if currentStatus == "completed" else "❌"
- print(f"\n{statusIcon} Workflow finished with status: {currentStatus} (elapsed: {elapsed}s)")
- return currentStatus == "completed"
-
- # Wait before next check
- await asyncio.sleep(checkInterval)
-
- except Exception as e:
- print(f"\n⚠️ Error checking workflow status: {str(e)}")
- await asyncio.sleep(checkInterval)
-
- def analyzeWorkflowResults(self) -> Dict[str, Any]:
- """Analyze workflow results and extract information."""
- if not self.workflow:
- return {"error": "No workflow to analyze"}
-
- interfaceDbChat = interfaceDbChat.getInterface(self.testUser)
- workflow = interfaceDbChat.getWorkflow(self.workflow.id)
-
- if not workflow:
- return {"error": "Workflow not found"}
-
- # Get unified chat data
- chatData = interfaceDbChat.getUnifiedChatData(workflow.id, None)
-
- # Count messages
- messages = chatData.get("messages", [])
- userMessages = [m for m in messages if m.get("role") == "user"]
- assistantMessages = [m for m in messages if m.get("role") == "assistant"]
-
- # Count documents
- documents = chatData.get("documents", [])
-
- # Get logs
- logs = chatData.get("logs", [])
-
- results = {
- "workflowId": workflow.id,
- "status": workflow.status,
- "workflowMode": str(workflow.workflowMode) if hasattr(workflow, 'workflowMode') else None,
- "currentRound": workflow.currentRound,
- "totalTasks": workflow.totalTasks,
- "totalActions": workflow.totalActions,
- "messageCount": len(messages),
- "userMessageCount": len(userMessages),
- "assistantMessageCount": len(assistantMessages),
- "documentCount": len(documents),
- "logCount": len(logs),
- "documents": documents,
- "logs": logs
- }
-
- print(f"\nWorkflow Results:")
- print(f" Status: {results['status']}")
- print(f" Tasks: {results['totalTasks']}")
- print(f" Actions: {results['totalActions']}")
- print(f" Messages: {results['messageCount']}")
- print(f" Documents: {results['documentCount']}")
-
- # Print document details
- if documents:
- print(f"\nGenerated Documents:")
- for doc in documents:
- fileName = doc.get("fileName", "unknown")
- fileSize = doc.get("fileSize", 0)
- mimeType = doc.get("mimeType", "unknown")
- print(f" - {fileName} ({fileSize} bytes, {mimeType})")
-
- return results
-
-
- def verifyCodeFormat(self, document: Dict[str, Any], expectedFormat: str) -> Dict[str, Any]:
- """Verify that a code file matches the expected format and is valid."""
- fileName = document.get("fileName", "")
- mimeType = document.get("mimeType", "")
- fileSize = document.get("fileSize", 0)
-
- # Expected MIME types
- expectedMimeTypes = {
- "json": ["application/json"],
- "csv": ["text/csv"],
- "xml": ["application/xml", "text/xml"]
- }
-
- # Expected file extensions
- expectedExtensions = {
- "json": [".json"],
- "csv": [".csv"],
- "xml": [".xml"]
- }
-
- formatLower = expectedFormat.lower()
- expectedMimes = expectedMimeTypes.get(formatLower, [])
- expectedExts = expectedExtensions.get(formatLower, [])
-
- # Check file extension
- hasCorrectExtension = any(fileName.lower().endswith(ext) for ext in expectedExts)
-
- # Check MIME type
- hasCorrectMimeType = any(mimeType.lower() == mime.lower() for mime in expectedMimes)
-
- # Check file size (should be > 0)
- hasValidSize = fileSize > 0
-
- # Try to read and validate content
- isValidContent = False
- validationError = None
-
- try:
- # Get file content from fileId
- fileId = document.get("fileId")
- if fileId and hasattr(self.services, 'interfaceDbComponent'):
- fileData = self.services.interfaceDbComponent.getFileData(fileId)
- if fileData:
- content = fileData.decode('utf-8') if isinstance(fileData, bytes) else fileData
-
- # Validate format-specific syntax
- if formatLower == "json":
- try:
- json.loads(content)
- isValidContent = True
- except json.JSONDecodeError as e:
- validationError = f"Invalid JSON: {str(e)}"
-
- elif formatLower == "csv":
- try:
- reader = csv.reader(io.StringIO(content))
- rows = list(reader)
- if len(rows) > 0:
- # Check header row exists
- headerCount = len(rows[0])
- # Check all rows have same column count
- allRowsValid = all(len(row) == headerCount for row in rows)
- isValidContent = allRowsValid
- if not allRowsValid:
- validationError = "CSV rows have inconsistent column counts"
- else:
- validationError = "CSV file is empty"
- except Exception as e:
- validationError = f"CSV parsing error: {str(e)}"
-
- elif formatLower == "xml":
- try:
- ET.fromstring(content)
- isValidContent = True
- except ET.ParseError as e:
- validationError = f"Invalid XML: {str(e)}"
- else:
- validationError = "Could not read file data"
- else:
- validationError = "No fileId available"
-
- except Exception as e:
- validationError = f"Error reading/validating file: {str(e)}"
-
- verification = {
- "format": expectedFormat,
- "fileName": fileName,
- "mimeType": mimeType,
- "fileSize": fileSize,
- "hasCorrectExtension": hasCorrectExtension,
- "hasCorrectMimeType": hasCorrectMimeType,
- "hasValidSize": hasValidSize,
- "isValidContent": isValidContent,
- "validationError": validationError,
- "isValid": hasCorrectExtension and hasValidSize and hasCorrectMimeType,
- "isComplete": hasCorrectExtension and hasValidSize and hasCorrectMimeType and isValidContent
- }
-
- return verification
-
- async def testAllFormats(self) -> Dict[str, Any]:
- """Test code generation in JSON, CSV, and XML formats."""
- print("\n" + "="*80)
- print("TESTING CODE GENERATION IN ALL FORMATS")
- print("="*80)
-
- # Test all code formats
- formats = ["json", "csv", "xml"]
- results = {}
-
- for format in formats:
- try:
- print(f"\n{'='*80}")
- print(f"Testing {format.upper()} format...")
- print(f"{'='*80}")
-
- result = await self.generateCodeInFormat(format)
- results[format] = result
-
- if result.get("success"):
- documents = result.get("documents", [])
- if documents:
- # Verify all documents (expecting 3 files per format)
- verifications = []
- for doc in documents:
- verification = self.verifyCodeFormat(doc, format)
- verifications.append(verification)
-
- result["verifications"] = verifications
-
- # Count valid documents
- validCount = sum(1 for v in verifications if v.get("isValid"))
- contentValidCount = sum(1 for v in verifications if v.get("isValidContent"))
-
- print(f"\n✅ {format.upper()} generation successful!")
- print(f" Documents: {len(documents)} (expected: 3)")
- print(f" Valid Format: {validCount}/{len(documents)}")
- print(f" Valid Content: {contentValidCount}/{len(documents)}")
-
- # Print details for each file
- for i, verification in enumerate(verifications, 1):
- statusIcon = "✅" if verification.get("isValid") else "❌"
- contentIcon = "✅" if verification.get("isValidContent") else "❌"
- print(f" File {i}: {statusIcon} Format, {contentIcon} Content - {verification.get('fileName', 'unknown')}")
- if verification.get("validationError"):
- print(f" Error: {verification['validationError']}")
- else:
- print(f"\n⚠️ {format.upper()} generation completed but no documents found")
- else:
- error = result.get("error", "Unknown error")
- print(f"\n❌ {format.upper()} generation failed: {error}")
-
- # Small delay between tests
- await asyncio.sleep(2)
-
- except Exception as e:
- import traceback
- print(f"\n❌ Error testing {format.upper()}: {str(e)}")
- print(traceback.format_exc())
- results[format] = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
-
- return results
-
- async def runTest(self):
- """Run the complete test."""
- print("\n" + "="*80)
- print("CODE GENERATION FORMATS TEST 11 - JSON, CSV, XML")
- print("="*80)
-
- try:
- # Initialize
- await self.initialize()
-
- # Test all formats
- formatResults = await self.testAllFormats()
-
- # Summary
- print("\n" + "="*80)
- print("TEST SUMMARY")
- print("="*80)
-
- # Format tests summary
- print("\nFormat Tests:")
- successCount = 0
- failCount = 0
- completeCount = 0 # Files with valid content
-
- for format, result in formatResults.items():
- if result.get("success"):
- successCount += 1
- verifications = result.get("verifications", [])
- docCount = result.get("documentCount", 0)
-
- # Count valid files
- validCount = sum(1 for v in verifications if v.get("isValid"))
- contentValidCount = sum(1 for v in verifications if v.get("isValidContent"))
- completeCount += contentValidCount
-
- # Overall status (all files valid)
- allValid = len(verifications) > 0 and all(v.get("isValid") for v in verifications)
- allContentValid = len(verifications) > 0 and all(v.get("isValidContent") for v in verifications)
-
- statusIcon = "✅" if allValid else "⚠️"
- contentIcon = "✅" if allContentValid else "❌"
-
- print(f"{statusIcon} {format.upper():6s}: {'PASS' if allValid else 'PARTIAL'} - {docCount} file(s) ({validCount} valid format, {contentValidCount} valid content)")
-
- # Print errors if any
- for v in verifications:
- if v.get("validationError"):
- print(f" {v.get('fileName', 'unknown')}: {v['validationError']}")
- else:
- failCount += 1
- error = result.get("error", "Unknown error")
- print(f"❌ {format.upper():6s}: FAIL - {error}")
-
- print(f"\nFormat Tests: {successCount} passed, {failCount} failed out of {len(formatResults)} formats")
- print(f"Valid Content Files: {completeCount} total files with valid content")
-
- self.testResults = {
- "success": failCount == 0,
- "formatTests": {
- "successCount": successCount,
- "failCount": failCount,
- "completeCount": completeCount,
- "totalFormats": len(formatResults),
- "results": formatResults
- },
- "totalSuccess": successCount,
- "totalFail": failCount
- }
-
- return self.testResults
-
- except Exception as e:
- import traceback
- print(f"\n❌ Test failed with error: {type(e).__name__}: {str(e)}")
- print(f"Traceback:\n{traceback.format_exc()}")
- self.testResults = {
- "success": False,
- "error": str(e),
- "traceback": traceback.format_exc()
- }
- return self.testResults
-
-
-async def main():
- """Run code generation formats test 11."""
- tester = CodeGenerationFormatsTester11()
- results = await tester.runTest()
-
- # Print final results as JSON for easy parsing
- print("\n" + "="*80)
- print("FINAL RESULTS (JSON)")
- print("="*80)
- print(json.dumps(results, indent=2, default=str))
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/tests/test_phase123_basic.py b/tests/test_phase123_basic.py
index d13c4271..49e52abb 100644
--- a/tests/test_phase123_basic.py
+++ b/tests/test_phase123_basic.py
@@ -38,10 +38,13 @@ except Exception as e:
try:
from modules.datamodels.datamodelSubscription import SubscriptionStatusEnum, BUILTIN_PLANS, SubscriptionPlan
_check("PENDING status exists", hasattr(SubscriptionStatusEnum, "PENDING"))
- _check("BUILTIN_PLANS has TRIAL_7D", "TRIAL_7D" in BUILTIN_PLANS)
- trial = BUILTIN_PLANS["TRIAL_7D"]
- _check("TRIAL_7D has maxDataVolumeMB", hasattr(trial, "maxDataVolumeMB"))
- _check("TRIAL_7D maxDataVolumeMB=500", trial.maxDataVolumeMB == 500)
+ _check("BUILTIN_PLANS has TRIAL_14D", "TRIAL_14D" in BUILTIN_PLANS)
+ trial = BUILTIN_PLANS["TRIAL_14D"]
+ _check("TRIAL_14D has maxDataVolumeMB", hasattr(trial, "maxDataVolumeMB"))
+ _check("TRIAL_14D maxDataVolumeMB=1024", trial.maxDataVolumeMB == 1024)
+ _check("TRIAL_14D has includedModules", hasattr(trial, "includedModules"))
+ _check("TRIAL_14D includedModules=2", trial.includedModules == 2)
+ _check("TRIAL_14D trialDays=14", trial.trialDays == 14)
except Exception as e:
errors.append(f"Phase 1 Subscription: {e}")
print(f" [FAIL] Phase 1 Subscription: {e}")
diff --git a/tests/unit/datamodels/test_workflow_models.py b/tests/unit/datamodels/test_workflow_models.py
index ab73f10f..59e3736d 100644
--- a/tests/unit/datamodels/test_workflow_models.py
+++ b/tests/unit/datamodels/test_workflow_models.py
@@ -19,7 +19,7 @@ from modules.datamodels.datamodelWorkflow import (
RequestContext,
UnderstandingResult,
TaskDefinition,
- TaskResult
+ WorkflowTaskResult
)
from modules.datamodels.datamodelDocref import DocumentReferenceList, DocumentListReference
from modules.datamodels.datamodelAi import OperationTypeEnum