From cc1fdb13e5cac700915a019f7e8ea5fd722aaf89 Mon Sep 17 00:00:00 2001 From: ValueOn AG Date: Wed, 8 Apr 2026 20:28:34 +0200 Subject: [PATCH] cleaned sql and ui language sets --- app.py | 6 + modules/connectors/connectorDbPostgre.py | 17 +- modules/datamodels/datamodelDocument.py | 13 +- modules/datamodels/datamodelUiLanguage.py | 85 + modules/features/chatbot/bridges/ai.py | 4 - .../features/chatbot/routeFeatureChatbot.py | 9 +- modules/features/chatbot/service.py | 10 +- .../datamodelFeatureGraphicalEditor.py | 12 + .../interfaceFeatureGraphicalEditor.py | 5 +- .../graphicalEditor/mainGraphicalEditor.py | 12 +- .../nodeDefinitions/__init__.py | 2 + .../graphicalEditor/nodeDefinitions/ai.py | 52 +- .../nodeDefinitions/clickup.py | 245 +-- .../graphicalEditor/nodeDefinitions/data.py | 54 + .../graphicalEditor/nodeDefinitions/email.py | 80 +- .../graphicalEditor/nodeDefinitions/file.py | 59 +- .../graphicalEditor/nodeDefinitions/flow.py | 61 +- .../graphicalEditor/nodeDefinitions/input.py | 76 +- .../nodeDefinitions/sharepoint.py | 74 +- .../nodeDefinitions/triggers.py | 8 + .../nodeDefinitions/trustee.py | 74 +- .../features/graphicalEditor/nodeRegistry.py | 21 +- modules/features/graphicalEditor/portTypes.py | 504 +++++ .../features/trustee/routeFeatureTrustee.py | 16 + .../workspace/routeFeatureWorkspace.py | 4 - modules/interfaces/interfaceDbApp.py | 40 + modules/interfaces/interfaceDbBilling.py | 349 +++ modules/interfaces/interfaceDbChat.py | 4 +- modules/interfaces/interfaceDbManagement.py | 39 + .../migration/seedData/ui_language_seed.json | 1915 +++++++++++++++++ modules/routes/routeBilling.py | 519 ++--- modules/routes/routeDataUsers.py | 49 +- modules/routes/routeI18n.py | 711 ++++++ modules/routes/routeSubscription.py | 44 +- modules/routes/routeSystem.py | 25 +- modules/routes/routeWorkflowDashboard.py | 267 +++ .../serviceAgent/coreTools/_documentTools.py | 33 +- .../coreTools/_featureSubAgentTools.py | 13 + .../services/serviceAgent/featureDataAgent.py | 47 +- .../serviceAgent/featureDataProvider.py | 30 + .../services/serviceAgent/mainServiceAgent.py | 8 +- .../serviceAi/subContentExtraction.py | 2 +- .../services/serviceAi/subDocumentIntents.py | 2 +- .../mainServiceGeneration.py | 4 - .../subStructureGenerator.py | 4 +- modules/shared/frontendTypes.py | 60 +- modules/system/mainSystem.py | 160 +- .../workflows/automation2/executionEngine.py | 83 +- .../automation2/executors/__init__.py | 2 + .../executors/actionNodeExecutor.py | 920 ++------ .../automation2/executors/dataExecutor.py | 214 ++ .../automation2/executors/flowExecutor.py | 69 +- modules/workflows/automation2/graphUtils.py | 62 +- .../methodTrustee/actions/extractFromFiles.py | 70 +- .../workflows/processing/modes/modeDynamic.py | 2 +- .../processing/shared/placeholderFactory.py | 2 - scripts/build_ui_language_seed_json.py | 100 + scripts/i18n_rekey_plaintext_keys.py | 136 ++ scripts/script_db_adapt_to_models.py | 2 +- 59 files changed, 5864 insertions(+), 1626 deletions(-) create mode 100644 modules/datamodels/datamodelUiLanguage.py create mode 100644 modules/features/graphicalEditor/nodeDefinitions/data.py create mode 100644 modules/features/graphicalEditor/portTypes.py create mode 100644 modules/migration/seedData/ui_language_seed.json create mode 100644 modules/routes/routeI18n.py create mode 100644 modules/routes/routeWorkflowDashboard.py create mode 100644 modules/workflows/automation2/executors/dataExecutor.py create mode 100644 scripts/build_ui_language_seed_json.py create mode 100644 scripts/i18n_rekey_plaintext_keys.py diff --git a/app.py b/app.py index a2b6f338..4b08dbff 100644 --- a/app.py +++ b/app.py @@ -578,6 +578,9 @@ app.include_router(invitationsRouter) from modules.routes.routeNotifications import router as notificationsRouter app.include_router(notificationsRouter) +from modules.routes.routeI18n import router as i18nRouter +app.include_router(i18nRouter) + from modules.routes.routeAdminRbacExport import router as rbacAdminExportRouter app.include_router(rbacAdminExportRouter) @@ -600,6 +603,9 @@ from modules.routes.routeSystem import router as systemRouter, navigationRouter app.include_router(systemRouter) app.include_router(navigationRouter) +from modules.routes.routeWorkflowDashboard import router as workflowDashboardRouter +app.include_router(workflowDashboardRouter) + # ============================================================================ # PLUG&PLAY FEATURE ROUTERS # Dynamically load routers from feature containers in modules/features/ diff --git a/modules/connectors/connectorDbPostgre.py b/modules/connectors/connectorDbPostgre.py index 2c7eeab3..2f62756a 100644 --- a/modules/connectors/connectorDbPostgre.py +++ b/modules/connectors/connectorDbPostgre.py @@ -946,13 +946,14 @@ class DatabaseConnector: if recordFilter: for field, value in recordFilter.items(): if value is None: - # Use IS NULL for None values (= NULL is always false in SQL) where_conditions.append(f'"{field}" IS NULL') + elif isinstance(value, list): + where_conditions.append(f'"{field}" = ANY(%s)') + where_values.append(value) else: where_conditions.append(f'"{field}" = %s') where_values.append(value) - # Build the query if where_conditions: where_clause = " WHERE " + " AND ".join(where_conditions) else: @@ -1113,13 +1114,15 @@ class DatabaseConnector: orderParts: List[str] = [] if pagination and pagination.sort: for sf in pagination.sort: - if sf.field in validColumns: - direction = "DESC" if sf.direction.lower() == "desc" else "ASC" - colType = fields.get(sf.field, "TEXT") + sfField = sf.get("field") if isinstance(sf, dict) else getattr(sf, "field", None) + sfDir = sf.get("direction", "asc") if isinstance(sf, dict) else getattr(sf, "direction", "asc") + if sfField and sfField in validColumns: + direction = "DESC" if str(sfDir).lower() == "desc" else "ASC" + colType = fields.get(sfField, "TEXT") if colType == "BOOLEAN": - orderParts.append(f'COALESCE("{sf.field}", FALSE) {direction}') + orderParts.append(f'COALESCE("{sfField}", FALSE) {direction}') else: - orderParts.append(f'"{sf.field}" {direction} NULLS LAST') + orderParts.append(f'"{sfField}" {direction} NULLS LAST') if not orderParts: orderParts.append('"id"') order_clause = " ORDER BY " + ", ".join(orderParts) diff --git a/modules/datamodels/datamodelDocument.py b/modules/datamodels/datamodelDocument.py index a5cd6b0c..e34c82ff 100644 --- a/modules/datamodels/datamodelDocument.py +++ b/modules/datamodels/datamodelDocument.py @@ -1,7 +1,7 @@ # Copyright (c) 2025 Patrick Motsch # All rights reserved. from typing import Any, Dict, List, Optional, Literal, Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_serializer from datetime import datetime @@ -116,11 +116,12 @@ class RenderedDocument(BaseModel): filename: str = Field(description="Filename for the document (e.g., 'report.html', 'image.png')") documentType: Optional[str] = Field(default=None, description="Type of document (e.g., 'report', 'invoice', 'analysis')") metadata: Optional[Dict[str, Any]] = Field(default=None, description="Document metadata (title, author, etc.)") - - class Config: - json_encoders = { - bytes: lambda v: v.decode('utf-8', errors='replace') if isinstance(v, bytes) else v - } + + @field_serializer("documentData") + def _serializeDocumentData(self, v: bytes) -> str: + if isinstance(v, bytes): + return v.decode("utf-8", errors="replace") + return str(v) # Update forward references diff --git a/modules/datamodels/datamodelUiLanguage.py b/modules/datamodels/datamodelUiLanguage.py new file mode 100644 index 00000000..e69a8aed --- /dev/null +++ b/modules/datamodels/datamodelUiLanguage.py @@ -0,0 +1,85 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +"""UI language sets: global i18n strings (German key -> translated value).""" + +from typing import Dict, Optional, Literal + +from pydantic import Field + +from modules.datamodels.datamodelBase import PowerOnModel +from modules.shared.attributeUtils import registerModelLabels + + +UiLanguageStatus = Literal["complete", "incomplete", "generating"] + + +class UiLanguageSet(PowerOnModel): + """ + One row per ISO 639-1 UI language. id equals code (e.g. de, en). + keys: flat map German plaintext key -> translation for this language. + For language de, values equal keys. + """ + + id: str = Field( + ..., + description="ISO 639-1 language code (primary key), e.g. de, en, fr", + json_schema_extra={ + "frontend_type": "text", + "frontend_readonly": False, + "frontend_required": True, + }, + ) + label: str = Field( + ..., + description="Human-readable language name", + json_schema_extra={ + "frontend_type": "text", + "frontend_readonly": False, + "frontend_required": True, + }, + ) + keys: Dict[str, str] = Field( + default_factory=dict, + description="German plaintext key -> translated label", + json_schema_extra={ + "frontend_type": "textarea", + "frontend_readonly": False, + "frontend_required": False, + }, + ) + status: UiLanguageStatus = Field( + default="complete", + description="complete | incomplete | generating", + json_schema_extra={ + "frontend_type": "select", + "frontend_readonly": False, + "frontend_required": True, + "frontend_options": [ + {"value": "complete", "label": {"de": "Vollständig", "en": "Complete"}}, + {"value": "incomplete", "label": {"de": "Unvollständig", "en": "Incomplete"}}, + {"value": "generating", "label": {"de": "Wird erzeugt", "en": "Generating"}}, + ], + }, + ) + isDefault: bool = Field( + default=False, + description="Exactly one set should be default (de)", + json_schema_extra={ + "frontend_type": "boolean", + "frontend_readonly": False, + "frontend_required": False, + }, + ) + + +registerModelLabels( + "UiLanguageSet", + {"en": "UI Language Set", "de": "UI-Sprachset"}, + { + "id": {"en": "Code", "de": "Code"}, + "label": {"en": "Label", "de": "Bezeichnung"}, + "keys": {"en": "Keys", "de": "Schlüssel"}, + "status": {"en": "Status", "de": "Status"}, + "isDefault": {"en": "Default", "de": "Standard"}, + }, +) diff --git a/modules/features/chatbot/bridges/ai.py b/modules/features/chatbot/bridges/ai.py index a06668c8..1962fa8f 100644 --- a/modules/features/chatbot/bridges/ai.py +++ b/modules/features/chatbot/bridges/ai.py @@ -462,11 +462,7 @@ class AICenterChatModel(BaseChatModel): elif isinstance(args_schema, BaseModel): # It's a Pydantic model instance if hasattr(args_schema, "model_dump"): - # Pydantic v2 parameters = args_schema.model_dump() - elif hasattr(args_schema, "dict"): - # Pydantic v1 - parameters = args_schema.dict() elif hasattr(args_schema, "schema"): # Has schema method (might be a class) try: diff --git a/modules/features/chatbot/routeFeatureChatbot.py b/modules/features/chatbot/routeFeatureChatbot.py index 1775a253..821e7ae9 100644 --- a/modules/features/chatbot/routeFeatureChatbot.py +++ b/modules/features/chatbot/routeFeatureChatbot.py @@ -150,8 +150,6 @@ def get_chatbot_threads( if hasattr(workflow, 'model_dump'): workflow_dict = workflow.model_dump() - elif hasattr(workflow, 'dict'): - workflow_dict = workflow.dict() elif isinstance(workflow, dict): workflow_dict = dict(workflow) else: @@ -317,11 +315,11 @@ async def stream_chatbot_start( # Emit filtered items for item in filtered_items: - # Convert Pydantic models to dicts for JSON serialization + _inner = item.get("item") serializable_item = { "type": item.get("type"), "createdAt": item.get("createdAt"), - "item": item.get("item").model_dump() if hasattr(item.get("item"), "model_dump") else (item.get("item").dict() if hasattr(item.get("item"), "dict") else item.get("item")) + "item": _inner.model_dump() if _inner is not None and hasattr(_inner, "model_dump") else _inner, } # Emit item directly in exact chatData format: {type, createdAt, item} yield f"data: {json.dumps(serializable_item)}\n\n" @@ -399,9 +397,6 @@ async def stream_chatbot_start( if hasattr(item_obj, "model_dump"): chatdata_item = chatdata_item.copy() chatdata_item["item"] = item_obj.model_dump() - elif hasattr(item_obj, "dict"): - chatdata_item = chatdata_item.copy() - chatdata_item["item"] = item_obj.dict() yield f"data: {json.dumps(chatdata_item)}\n\n" # Handle completion/stopped events to close stream diff --git a/modules/features/chatbot/service.py b/modules/features/chatbot/service.py index a98150b5..5d60cdd7 100644 --- a/modules/features/chatbot/service.py +++ b/modules/features/chatbot/service.py @@ -278,7 +278,7 @@ async def _update_conversation_name_async( # Emit stat event so frontend can refresh thread list/title workflow = interfaceDbChat.getWorkflow(workflowId) if workflow: - wf_dict = workflow.model_dump() if hasattr(workflow, "model_dump") else workflow.dict() + wf_dict = workflow.model_dump() await event_manager.emit_event( context_id=workflowId, event_type="chatdata", @@ -966,7 +966,7 @@ async def _bridge_chatbot_events( data={ "type": "message", "createdAt": message_timestamp, - "item": last_message.dict() + "item": last_message.model_dump() }, event_category="chat" ) @@ -1005,7 +1005,7 @@ async def _bridge_chatbot_events( data={ "type": "message", "createdAt": message_timestamp, - "item": assistant_msg.dict() + "item": assistant_msg.model_dump() }, event_category="chat" ) @@ -1089,7 +1089,7 @@ async def _bridge_chatbot_events( data={ "type": "message", "createdAt": message_timestamp, - "item": error_msg.dict() + "item": error_msg.model_dump() }, event_category="chat" ) @@ -1490,7 +1490,7 @@ async def _processChatbotMessageLangGraph( data={ "type": "message", "createdAt": message_timestamp, - "item": errorMessage.dict() + "item": errorMessage.model_dump() }, event_category="chat" ) diff --git a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py index bf94c12a..d8aeef05 100644 --- a/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/datamodelFeatureGraphicalEditor.py @@ -234,6 +234,16 @@ class AutoRun(PowerOnModel): description="Workflow ID", json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": True}, ) + mandateId: Optional[str] = Field( + default=None, + description="Mandate ID for cross-feature querying", + json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}, + ) + ownerId: Optional[str] = Field( + default=None, + description="User ID who triggered this run", + json_schema_extra={"frontend_type": "text", "frontend_readonly": True, "frontend_required": False}, + ) versionId: Optional[str] = Field( default=None, description="AutoVersion ID used for this run", @@ -297,6 +307,8 @@ registerModelLabels( { "id": {"en": "ID", "de": "ID", "fr": "ID"}, "workflowId": {"en": "Workflow ID", "de": "Workflow-ID", "fr": "ID workflow"}, + "mandateId": {"en": "Mandate ID", "de": "Mandanten-ID", "fr": "ID du mandat"}, + "ownerId": {"en": "Owner", "de": "Auslöser", "fr": "Propriétaire"}, "versionId": {"en": "Version ID", "de": "Versions-ID", "fr": "ID version"}, "status": {"en": "Status", "de": "Status", "fr": "Statut"}, "trigger": {"en": "Trigger", "de": "Auslöser", "fr": "Déclencheur"}, diff --git a/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py b/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py index 4dd679bb..436f2d34 100644 --- a/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py +++ b/modules/features/graphicalEditor/interfaceFeatureGraphicalEditor.py @@ -267,13 +267,16 @@ class GraphicalEditorObjects: def createRun(self, workflowId: str, nodeOutputs: Dict = None, context: Dict = None) -> Dict[str, Any]: """Create a new workflow run.""" + ctx = context or {} data = { "id": str(uuid.uuid4()), "workflowId": workflowId, "status": "running", "nodeOutputs": _make_json_serializable(nodeOutputs or {}), "currentNodeId": None, - "context": context or {}, + "context": ctx, + "mandateId": ctx.get("mandateId") or self.mandateId, + "ownerId": ctx.get("userId") or (self.currentUser.id if self.currentUser else None), } created = self.db.recordCreate(Automation2WorkflowRun, data) return dict(created) diff --git a/modules/features/graphicalEditor/mainGraphicalEditor.py b/modules/features/graphicalEditor/mainGraphicalEditor.py index ab437a54..5a8917e2 100644 --- a/modules/features/graphicalEditor/mainGraphicalEditor.py +++ b/modules/features/graphicalEditor/mainGraphicalEditor.py @@ -45,11 +45,6 @@ UI_OBJECTS = [ "label": {"en": "Tasks", "de": "Tasks", "fr": "Tâches"}, "meta": {"area": "tasks"} }, - { - "objectKey": "ui.feature.graphicalEditor.dashboard", - "label": {"en": "Dashboard", "de": "Dashboard", "fr": "Tableau de bord"}, - "meta": {"area": "dashboard"} - }, ] RESOURCE_OBJECTS = [ @@ -79,7 +74,6 @@ TEMPLATE_ROLES = [ "fr": "Visualiseur Éditeur graphique - Consulter les workflows (lecture seule)", }, "accessRules": [ - {"context": "UI", "item": "ui.feature.graphicalEditor.dashboard", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.templates", "view": True}, @@ -94,7 +88,6 @@ TEMPLATE_ROLES = [ "fr": "Utilisateur Éditeur graphique - Utiliser le flow builder", }, "accessRules": [ - {"context": "UI", "item": "ui.feature.graphicalEditor.dashboard", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.editor", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows", "view": True}, {"context": "UI", "item": "ui.feature.graphicalEditor.workflows-tasks", "view": True}, @@ -141,10 +134,11 @@ def getGraphicalEditorServices( _workflow = workflow if _workflow is None: + import uuid as _uuid _workflow = type( "_Placeholder", (), - {"featureCode": FEATURE_CODE, "id": None, "workflowMode": None, "messages": []}, + {"featureCode": FEATURE_CODE, "id": f"transient-{_uuid.uuid4().hex[:12]}", "workflowMode": None, "messages": []}, )() ctx = ServiceCenterContext( @@ -159,7 +153,7 @@ def getGraphicalEditorServices( hub.mandateId = mandateId hub.featureInstanceId = featureInstanceId hub._service_context = ctx - hub.workflow = workflow + hub.workflow = _workflow hub.featureCode = FEATURE_CODE for spec in REQUIRED_SERVICES: diff --git a/modules/features/graphicalEditor/nodeDefinitions/__init__.py b/modules/features/graphicalEditor/nodeDefinitions/__init__.py index 5fda431e..ab41094b 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/__init__.py +++ b/modules/features/graphicalEditor/nodeDefinitions/__init__.py @@ -10,6 +10,7 @@ from .sharepoint import SHAREPOINT_NODES from .clickup import CLICKUP_NODES from .file import FILE_NODES from .trustee import TRUSTEE_NODES +from .data import DATA_NODES STATIC_NODE_TYPES = ( TRIGGER_NODES @@ -21,4 +22,5 @@ STATIC_NODE_TYPES = ( + CLICKUP_NODES + FILE_NODES + TRUSTEE_NODES + + DATA_NODES ) diff --git a/modules/features/graphicalEditor/nodeDefinitions/ai.py b/modules/features/graphicalEditor/nodeDefinitions/ai.py index bb85e809..8586f9c4 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/ai.py +++ b/modules/features/graphicalEditor/nodeDefinitions/ai.py @@ -8,14 +8,19 @@ AI_NODES = [ "label": {"en": "Prompt", "de": "Prompt", "fr": "Invite"}, "description": {"en": "Enter a prompt and AI does something", "de": "Prompt eingeben und KI führt aus", "fr": "Entrer une invite et l'IA exécute"}, "parameters": [ - {"name": "prompt", "type": "string", "required": True, "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}}, + {"name": "aiPrompt", "type": "string", "required": True, "frontendType": "textarea", + "description": {"en": "AI prompt", "de": "KI-Prompt", "fr": "Invite IA"}}, + {"name": "outputFormat", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["text", "json", "emailDraft"]}, + "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "text"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "AiResult"}}, "meta": {"icon": "mdi-robot", "color": "#9C27B0"}, "_method": "ai", "_action": "process", - "_paramMap": {"prompt": "aiPrompt"}, }, { "id": "ai.webResearch", @@ -23,14 +28,16 @@ AI_NODES = [ "label": {"en": "Web Research", "de": "Web-Recherche", "fr": "Recherche web"}, "description": {"en": "Research on the web", "de": "Recherche im Web", "fr": "Recherche sur le web"}, "parameters": [ - {"name": "query", "type": "string", "required": True, "description": {"en": "Research query", "de": "Recherche-Anfrage", "fr": "Requête de recherche"}}, + {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", + "description": {"en": "Research query", "de": "Recherche-Anfrage", "fr": "Requête de recherche"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "AiResult"}}, "meta": {"icon": "mdi-magnify", "color": "#9C27B0"}, "_method": "ai", "_action": "webResearch", - "_paramMap": {"query": "prompt"}, }, { "id": "ai.summarizeDocument", @@ -38,14 +45,17 @@ AI_NODES = [ "label": {"en": "Summarize Document", "de": "Dokument zusammenfassen", "fr": "Résumer document"}, "description": {"en": "Summarize document content", "de": "Dokumentinhalt zusammenfassen", "fr": "Résumer le contenu du document"}, "parameters": [ - {"name": "summaryLength", "type": "string", "required": False, "description": {"en": "Short, medium, or long", "de": "Kurz, mittel oder lang", "fr": "Court, moyen ou long"}, "default": "medium"}, + {"name": "summaryLength", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["short", "medium", "long"]}, + "description": {"en": "Short, medium, or long", "de": "Kurz, mittel oder lang", "fr": "Court, moyen ou long"}, "default": "medium"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "AiResult"}}, "meta": {"icon": "mdi-file-document-outline", "color": "#9C27B0"}, "_method": "ai", "_action": "summarizeDocument", - "_paramMap": {}, }, { "id": "ai.translateDocument", @@ -53,14 +63,17 @@ AI_NODES = [ "label": {"en": "Translate Document", "de": "Dokument übersetzen", "fr": "Traduire document"}, "description": {"en": "Translate document to target language", "de": "Dokument in Zielsprache übersetzen", "fr": "Traduire le document"}, "parameters": [ - {"name": "targetLanguage", "type": "string", "required": True, "description": {"en": "Target language (e.g. en, de, fr)", "de": "Zielsprache", "fr": "Langue cible"}}, + {"name": "targetLanguage", "type": "string", "required": True, "frontendType": "select", + "frontendOptions": {"options": ["en", "de", "fr", "it", "es", "pt", "nl"]}, + "description": {"en": "Target language", "de": "Zielsprache", "fr": "Langue cible"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "AiResult"}}, "meta": {"icon": "mdi-translate", "color": "#9C27B0"}, "_method": "ai", "_action": "translateDocument", - "_paramMap": {"targetLanguage": "targetLanguage"}, }, { "id": "ai.convertDocument", @@ -68,14 +81,17 @@ AI_NODES = [ "label": {"en": "Convert Document", "de": "Dokument konvertieren", "fr": "Convertir document"}, "description": {"en": "Convert document to another format", "de": "Dokument in anderes Format konvertieren", "fr": "Convertir le document"}, "parameters": [ - {"name": "targetFormat", "type": "string", "required": True, "description": {"en": "Target format (pdf, docx, txt, etc.)", "de": "Zielformat", "fr": "Format cible"}}, + {"name": "targetFormat", "type": "string", "required": True, "frontendType": "select", + "frontendOptions": {"options": ["pdf", "docx", "txt", "html", "md"]}, + "description": {"en": "Target format", "de": "Zielformat", "fr": "Format cible"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-file-convert", "color": "#9C27B0"}, "_method": "ai", "_action": "convertDocument", - "_paramMap": {"targetFormat": "targetFormat"}, }, { "id": "ai.generateDocument", @@ -83,14 +99,16 @@ AI_NODES = [ "label": {"en": "Generate Document", "de": "Dokument generieren", "fr": "Générer document"}, "description": {"en": "Generate document from prompt", "de": "Dokument aus Prompt generieren", "fr": "Générer un document"}, "parameters": [ - {"name": "prompt", "type": "string", "required": True, "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}}, + {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", + "description": {"en": "Generation prompt", "de": "Generierungs-Prompt", "fr": "Invite de génération"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-file-plus", "color": "#9C27B0"}, "_method": "ai", "_action": "generateDocument", - "_paramMap": {"prompt": "prompt", "format": "format"}, }, { "id": "ai.generateCode", @@ -98,14 +116,18 @@ AI_NODES = [ "label": {"en": "Generate Code", "de": "Code generieren", "fr": "Générer code"}, "description": {"en": "Generate code from description", "de": "Code aus Beschreibung generieren", "fr": "Générer du code"}, "parameters": [ - {"name": "prompt", "type": "string", "required": True, "description": {"en": "Code generation prompt", "de": "Code-Generierungs-Prompt", "fr": "Invite de génération de code"}}, - {"name": "language", "type": "string", "required": False, "description": {"en": "Programming language", "de": "Programmiersprache", "fr": "Langage de programmation"}, "default": "python"}, + {"name": "prompt", "type": "string", "required": True, "frontendType": "textarea", + "description": {"en": "Code generation prompt", "de": "Code-Generierungs-Prompt", "fr": "Invite de génération de code"}}, + {"name": "language", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["python", "javascript", "typescript", "java", "csharp", "go"]}, + "description": {"en": "Programming language", "de": "Programmiersprache", "fr": "Langage de programmation"}, "default": "python"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "AiResult"}}, "meta": {"icon": "mdi-code-tags", "color": "#9C27B0"}, "_method": "ai", "_action": "generateCode", - "_paramMap": {"prompt": "prompt", "language": "language"}, }, ] diff --git a/modules/features/graphicalEditor/nodeDefinitions/clickup.py b/modules/features/graphicalEditor/nodeDefinitions/clickup.py index 4acb0db9..0d3c75af 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/clickup.py +++ b/modules/features/graphicalEditor/nodeDefinitions/clickup.py @@ -7,102 +7,57 @@ CLICKUP_NODES = [ "id": "clickup.searchTasks", "category": "clickup", "label": {"en": "Search tasks", "de": "Aufgaben suchen", "fr": "Rechercher tâches"}, - "description": { - "en": "Search tasks in a workspace (team)", - "de": "Aufgaben in einem Workspace suchen", - "fr": "Rechercher des tâches dans un espace", - }, + "description": {"en": "Search tasks in a workspace", "de": "Aufgaben in einem Workspace suchen", "fr": "Rechercher des tâches"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, - {"name": "teamId", "type": "string", "required": True, "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}}, - {"name": "query", "type": "string", "required": True, "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}}, - {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, - { - "name": "listId", - "type": "string", - "required": False, - "description": { - "en": "If set, search this list via list API (not team search).", - "de": "Wenn gesetzt: Suche in dieser Liste (Listen-API, nicht Team-Suche).", - "fr": "Si défini : recherche dans cette liste (API liste).", - }, - }, - { - "name": "includeClosed", - "type": "boolean", - "required": False, - "default": False, - "description": { - "en": "With listId: include closed tasks.", - "de": "Mit Liste: erledigte Aufgaben einbeziehen.", - "fr": "Avec liste : inclure les tâches terminées.", - }, - }, - { - "name": "fullTaskData", - "type": "boolean", - "required": False, - "default": False, - "description": { - "en": "Return full ClickUp API JSON per task (very large). Default: slim fields only.", - "de": "Vollständige ClickUp-Rohdaten pro Task (sehr groß). Standard: nur schlanke Felder.", - "fr": "Réponse brute complète (très volumineuse). Par défaut : champs réduits.", - }, - }, - { - "name": "matchNameOnly", - "type": "boolean", - "required": False, - "default": True, - "description": { - "en": "Keep only tasks whose title contains the search query (default: on).", - "de": "Nur Aufgaben, deren Titel den Suchbegriff enthält (Standard: an).", - "fr": "Ne garder que les tâches dont le titre contient la requête (défaut : oui).", - }, - }, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "teamId", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Workspace (team) ID", "de": "Team-/Workspace-ID", "fr": "ID équipe"}}, + {"name": "query", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Search query", "de": "Suchbegriff", "fr": "Requête"}}, + {"name": "page", "type": "number", "required": False, "frontendType": "number", + "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, + {"name": "listId", "type": "string", "required": False, "frontendType": "clickupList", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Search in this list", "de": "In dieser Liste suchen", "fr": "Rechercher dans cette liste"}}, + {"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Include closed tasks", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False}, + {"name": "fullTaskData", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Return full task data", "de": "Vollständige Daten", "fr": "Données complètes"}, "default": False}, + {"name": "matchNameOnly", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Match title only", "de": "Nur Titel", "fr": "Titre uniquement"}, "default": True}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "TaskList"}}, "meta": {"icon": "mdi-magnify", "color": "#7B68EE"}, "_method": "clickup", "_action": "searchTasks", - "_paramMap": { - "connectionId": "connectionReference", - "teamId": "teamId", - "query": "query", - "page": "page", - "listId": "listId", - "fullTaskData": "fullTaskData", - "matchNameOnly": "matchNameOnly", - "includeClosed": "includeClosed", - }, }, { "id": "clickup.listTasks", "category": "clickup", "label": {"en": "List tasks", "de": "Aufgaben auflisten", "fr": "Lister les tâches"}, - "description": { - "en": "List tasks in a list (pick list path from browse)", - "de": "Aufgaben einer Liste auflisten (Pfad aus Browse)", - "fr": "Lister les tâches d'une liste", - }, + "description": {"en": "List tasks in a list", "de": "Aufgaben einer Liste auflisten", "fr": "Lister les tâches"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, - {"name": "path", "type": "string", "required": True, "description": {"en": "Virtual path to list /team/.../list/...", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}}, - {"name": "page", "type": "number", "required": False, "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, - {"name": "includeClosed", "type": "boolean", "required": False, "description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "pathQuery", "type": "string", "required": True, "frontendType": "clickupList", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Path to list", "de": "Pfad zur Liste", "fr": "Chemin vers la liste"}}, + {"name": "page", "type": "number", "required": False, "frontendType": "number", + "description": {"en": "Page", "de": "Seite", "fr": "Page"}, "default": 0}, + {"name": "includeClosed", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Include closed", "de": "Erledigte einbeziehen", "fr": "Inclure terminées"}, "default": False}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "TaskList"}}, "meta": {"icon": "mdi-format-list-bulleted", "color": "#7B68EE"}, "_method": "clickup", "_action": "listTasks", - "_paramMap": { - "connectionId": "connectionReference", - "path": "pathQuery", - "page": "page", - "includeClosed": "includeClosed", - }, }, { "id": "clickup.getTask", @@ -110,118 +65,112 @@ CLICKUP_NODES = [ "label": {"en": "Get task", "de": "Aufgabe abrufen", "fr": "Obtenir la tâche"}, "description": {"en": "Get one task by ID or path", "de": "Eine Aufgabe abrufen", "fr": "Obtenir une tâche"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, - {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, - {"name": "path", "type": "string", "required": False, "description": {"en": "Or path .../task/{id}", "de": "Oder Pfad .../task/{id}", "fr": "Ou chemin .../task/{id}"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "pathQuery", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Or path .../task/{id}", "de": "Oder Pfad", "fr": "Ou chemin"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "TaskResult"}}, "meta": {"icon": "mdi-file-document-outline", "color": "#7B68EE"}, "_method": "clickup", "_action": "getTask", - "_paramMap": {"connectionId": "connectionReference", "taskId": "taskId", "path": "pathQuery"}, }, { "id": "clickup.createTask", "category": "clickup", "label": {"en": "Create task", "de": "Aufgabe erstellen", "fr": "Créer une tâche"}, - "description": {"en": "Create a task in a list", "de": "Aufgabe in einer Liste erstellen", "fr": "Créer une tâche dans une liste"}, + "description": {"en": "Create a task in a list", "de": "Aufgabe erstellen", "fr": "Créer une tâche"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, - {"name": "teamId", "type": "string", "required": False, "description": {"en": "Workspace (team) for list picker", "de": "Workspace für Listen-Auswahl", "fr": "Équipe"}}, - {"name": "path", "type": "string", "required": False, "description": {"en": "Optional path /team/.../list/...", "de": "Optional: Pfad zur Liste", "fr": "Chemin optionnel"}}, - {"name": "listId", "type": "string", "required": False, "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}}, - {"name": "name", "type": "string", "required": True, "description": {"en": "Task name", "de": "Name", "fr": "Nom"}}, - {"name": "description", "type": "string", "required": False, "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}}, - {"name": "taskStatus", "type": "string", "required": False, "description": {"en": "Status (list status name)", "de": "Status (wie in der Liste)", "fr": "Statut"}}, - {"name": "taskPriority", "type": "string", "required": False, "description": {"en": "1–4 or empty", "de": "1–4 oder leer", "fr": "1–4"}}, - {"name": "taskDueDateMs", "type": "string", "required": False, "description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}}, - {"name": "taskAssigneeIds", "type": "object", "required": False, "description": {"en": "Assignee user ids", "de": "Zugewiesene (User-IDs)", "fr": "Assignés"}}, - {"name": "taskTimeEstimateMs", "type": "string", "required": False, "description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}}, - {"name": "taskTimeEstimateHours", "type": "string", "required": False, "description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (Stunden)", "fr": "Heures"}}, - {"name": "customFieldValues", "type": "object", "required": False, "description": {"en": "Custom field id → value", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}}, - {"name": "taskFields", "type": "string", "required": False, "description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON (fortgeschritten)", "fr": "JSON avancé"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "teamId", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Workspace (team)", "de": "Workspace", "fr": "Équipe"}}, + {"name": "pathQuery", "type": "string", "required": False, "frontendType": "clickupList", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Path to list", "de": "Pfad zur Liste", "fr": "Chemin"}}, + {"name": "listId", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "List ID", "de": "Listen-ID", "fr": "ID liste"}}, + {"name": "name", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Task name", "de": "Name", "fr": "Nom"}}, + {"name": "description", "type": "string", "required": False, "frontendType": "textarea", + "description": {"en": "Description", "de": "Beschreibung", "fr": "Description"}}, + {"name": "taskStatus", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Status", "de": "Status", "fr": "Statut"}}, + {"name": "taskPriority", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["1", "2", "3", "4"]}, + "description": {"en": "Priority 1-4", "de": "Priorität 1-4", "fr": "Priorité 1-4"}}, + {"name": "taskDueDateMs", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Due date (Unix ms)", "de": "Fälligkeit (ms)", "fr": "Échéance (ms)"}}, + {"name": "taskAssigneeIds", "type": "object", "required": False, "frontendType": "json", + "description": {"en": "Assignee user ids", "de": "Zugewiesene", "fr": "Assignés"}}, + {"name": "taskTimeEstimateMs", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Time estimate (ms)", "de": "Zeitschätzung (ms)", "fr": "Estimation (ms)"}}, + {"name": "taskTimeEstimateHours", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Time estimate (hours)", "de": "Zeitschätzung (h)", "fr": "Heures"}}, + {"name": "customFieldValues", "type": "object", "required": False, "frontendType": "json", + "description": {"en": "Custom fields", "de": "Benutzerdefinierte Felder", "fr": "Champs personnalisés"}}, + {"name": "taskFields", "type": "string", "required": False, "frontendType": "json", + "description": {"en": "Extra JSON (advanced)", "de": "Zusätzliches JSON", "fr": "JSON avancé"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "TaskResult"}}, "meta": {"icon": "mdi-plus-circle-outline", "color": "#7B68EE"}, "_method": "clickup", "_action": "createTask", - "_paramMap": { - "connectionId": "connectionReference", - "teamId": "teamId", - "path": "pathQuery", - "listId": "listId", - "name": "name", - "description": "description", - "taskStatus": "taskStatus", - "taskPriority": "taskPriority", - "taskDueDateMs": "taskDueDateMs", - "taskAssigneeIds": "taskAssigneeIds", - "taskTimeEstimateMs": "taskTimeEstimateMs", - "taskTimeEstimateHours": "taskTimeEstimateHours", - "customFieldValues": "customFieldValues", - "taskFields": "taskFields", - }, }, { "id": "clickup.updateTask", "category": "clickup", "label": {"en": "Update task", "de": "Aufgabe aktualisieren", "fr": "Mettre à jour la tâche"}, - "description": { - "en": "Update task fields (rows or JSON)", - "de": "Felder der Aufgabe ändern (Zeilen oder JSON)", - "fr": "Mettre à jour les champs (lignes ou JSON)", - }, + "description": {"en": "Update task fields", "de": "Felder der Aufgabe ändern", "fr": "Mettre à jour les champs"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, - {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, - {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, - { - "name": "taskUpdateEntries", - "type": "object", - "required": False, - "description": { - "en": "List of {fieldKey, value, customFieldId?}", - "de": "Liste der zu ändernden Felder (fieldKey, value, optional customFieldId)", - "fr": "Liste de champs à mettre à jour", - }, - }, - {"name": "taskUpdate", "type": "string", "required": False, "description": {"en": "JSON body for API (optional if rows set)", "de": "JSON für API (optional wenn Zeilen gesetzt)", "fr": "Corps JSON"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, + {"name": "taskUpdateEntries", "type": "object", "required": False, "frontendType": "keyValueRows", + "description": {"en": "Fields to update", "de": "Zu ändernde Felder", "fr": "Champs à mettre à jour"}}, + {"name": "taskUpdate", "type": "string", "required": False, "frontendType": "json", + "description": {"en": "JSON body (advanced)", "de": "JSON für API", "fr": "Corps JSON"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["TaskResult", "Transit"]}}, + "outputPorts": {0: {"schema": "TaskResult"}}, "meta": {"icon": "mdi-pencil-outline", "color": "#7B68EE"}, "_method": "clickup", "_action": "updateTask", - "_paramMap": { - "connectionId": "connectionReference", - "taskId": "taskId", - "path": "path", - "taskUpdate": "taskUpdate", - }, }, { "id": "clickup.uploadAttachment", "category": "clickup", "label": {"en": "Upload attachment", "de": "Anhang hochladen", "fr": "Téléverser pièce jointe"}, - "description": {"en": "Upload file to a task (upstream file)", "de": "Datei an Task anhängen", "fr": "Joindre un fichier à la tâche"}, + "description": {"en": "Upload file to a task", "de": "Datei an Task anhängen", "fr": "Joindre un fichier"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, - {"name": "taskId", "type": "string", "required": False, "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, - {"name": "path", "type": "string", "required": False, "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, - {"name": "fileName", "type": "string", "required": False, "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "ClickUp connection", "de": "ClickUp-Verbindung", "fr": "Connexion ClickUp"}}, + {"name": "taskId", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Task ID", "de": "Task-ID", "fr": "ID tâche"}}, + {"name": "path", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Or path to task", "de": "Oder Pfad", "fr": "Ou chemin"}}, + {"name": "fileName", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "File name", "de": "Dateiname", "fr": "Nom du fichier"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-attachment", "color": "#7B68EE"}, "_method": "clickup", "_action": "uploadAttachment", - "_paramMap": { - "connectionId": "connectionReference", - "taskId": "taskId", - "path": "path", - "fileName": "fileName", - }, }, ] diff --git a/modules/features/graphicalEditor/nodeDefinitions/data.py b/modules/features/graphicalEditor/nodeDefinitions/data.py new file mode 100644 index 00000000..a96c7ee5 --- /dev/null +++ b/modules/features/graphicalEditor/nodeDefinitions/data.py @@ -0,0 +1,54 @@ +# Copyright (c) 2025 Patrick Motsch +# Data manipulation node definitions: aggregate, transform, filter. + +DATA_NODES = [ + { + "id": "data.aggregate", + "category": "data", + "label": {"en": "Aggregate", "de": "Sammeln", "fr": "Agréger"}, + "description": {"en": "Collect results from loop iterations", "de": "Ergebnisse aus Schleifen-Iterationen sammeln", "fr": "Collecter les résultats des itérations"}, + "parameters": [ + {"name": "mode", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["collect", "concat", "sum", "count"]}, + "description": {"en": "Aggregation mode", "de": "Aggregationsmodus", "fr": "Mode d'agrégation"}, "default": "collect"}, + ], + "inputs": 1, + "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "AggregateResult"}}, + "executor": "data", + "meta": {"icon": "mdi-playlist-plus", "color": "#607D8B"}, + }, + { + "id": "data.transform", + "category": "data", + "label": {"en": "Transform", "de": "Umwandeln", "fr": "Transformer"}, + "description": {"en": "Map and restructure data", "de": "Daten umstrukturieren", "fr": "Restructurer les données"}, + "parameters": [ + {"name": "mappings", "type": "json", "required": True, "frontendType": "mappingTable", + "description": {"en": "Field mappings", "de": "Feld-Zuordnungen", "fr": "Correspondances"}, "default": []}, + ], + "inputs": 1, + "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult", "dynamic": True, "deriveFrom": "mappings"}}, + "executor": "data", + "meta": {"icon": "mdi-swap-horizontal-bold", "color": "#607D8B"}, + }, + { + "id": "data.filter", + "category": "data", + "label": {"en": "Filter", "de": "Filtern", "fr": "Filtrer"}, + "description": {"en": "Filter items by condition", "de": "Elemente nach Bedingung filtern", "fr": "Filtrer par condition"}, + "parameters": [ + {"name": "condition", "type": "string", "required": True, "frontendType": "filterExpression", + "description": {"en": "Filter condition", "de": "Filterbedingung", "fr": "Condition de filtre"}}, + ], + "inputs": 1, + "outputs": 1, + "inputPorts": {0: {"accepts": ["AggregateResult", "FileList", "TaskList", "EmailList", "DocumentList"]}}, + "outputPorts": {0: {"schema": "Transit"}}, + "executor": "data", + "meta": {"icon": "mdi-filter-outline", "color": "#607D8B"}, + }, +] diff --git a/modules/features/graphicalEditor/nodeDefinitions/email.py b/modules/features/graphicalEditor/nodeDefinitions/email.py index b96a5389..87ea5244 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/email.py +++ b/modules/features/graphicalEditor/nodeDefinitions/email.py @@ -1,70 +1,92 @@ # Copyright (c) 2025 Patrick Motsch # Email node definitions - map to methodOutlook actions. -# Use connectionId from user connections (like AI workspace sources). EMAIL_NODES = [ { "id": "email.checkEmail", "category": "email", "label": {"en": "Check Email", "de": "E-Mail prüfen", "fr": "Vérifier email"}, - "description": {"en": "Check for new emails (general or from specific account)", "de": "Neue E-Mails prüfen", "fr": "Vérifier les nouveaux emails"}, + "description": {"en": "Check for new emails", "de": "Neue E-Mails prüfen", "fr": "Vérifier les nouveaux emails"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, - {"name": "folder", "type": "string", "required": False, "description": {"en": "Folder (e.g. Inbox)", "de": "Ordner (z.B. Posteingang)", "fr": "Dossier (ex. Boîte de réception)"}, "default": "Inbox"}, - {"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to fetch", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100}, - {"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""}, - {"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""}, - {"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False}, - {"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw filter (overrides above if set)", "de": "Erweitert: Filter-Text (überschreibt obige)", "fr": "Avancé: filtre brut"}, "default": ""}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, + {"name": "folder", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Folder (e.g. Inbox)", "de": "Ordner", "fr": "Dossier"}, "default": "Inbox"}, + {"name": "limit", "type": "number", "required": False, "frontendType": "number", + "description": {"en": "Max emails to fetch", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100}, + {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Only emails from this address", "de": "Nur von dieser Adresse", "fr": "Seulement de cette adresse"}, "default": ""}, + {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Subject must contain", "de": "Betreff muss enthalten", "fr": "Le sujet doit contenir"}, "default": ""}, + {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Only with attachments", "de": "Nur mit Anhängen", "fr": "Avec pièces jointes"}, "default": False}, + {"name": "filter", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Advanced: raw filter", "de": "Erweitert: Filter-Text", "fr": "Avancé: filtre brut"}, "default": ""}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "EmailList"}}, "meta": {"icon": "mdi-email-check", "color": "#1976D2"}, "_method": "outlook", "_action": "readEmails", - "_paramMap": {"connectionId": "connectionReference", "folder": "folder", "limit": "limit", "filter": "filter"}, }, { "id": "email.searchEmail", "category": "email", "label": {"en": "Search Email", "de": "E-Mail suchen", "fr": "Rechercher email"}, - "description": {"en": "Search or find emails", "de": "E-Mails suchen oder finden", "fr": "Rechercher des emails"}, + "description": {"en": "Search or find emails", "de": "E-Mails suchen", "fr": "Rechercher des emails"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, - {"name": "query", "type": "string", "required": False, "description": {"en": "General search term (searches subject, body, from)", "de": "Suchbegriff (durchsucht Betreff, Inhalt, Absender)", "fr": "Terme de recherche (sujet, corps, expéditeur)"}, "default": ""}, - {"name": "folder", "type": "string", "required": False, "description": {"en": "Folder to search", "de": "Ordner zum Suchen", "fr": "Dossier à rechercher"}, "default": "Inbox"}, - {"name": "limit", "type": "number", "required": False, "description": {"en": "Max emails to return", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100}, - {"name": "fromAddress", "type": "string", "required": False, "description": {"en": "Only emails from this address", "de": "Nur E-Mails von dieser Adresse", "fr": "Seulement les e-mails de cette adresse"}, "default": ""}, - {"name": "toAddress", "type": "string", "required": False, "description": {"en": "Only emails to this recipient", "de": "Nur E-Mails an diesen Empfänger", "fr": "Seulement les e-mails à ce destinataire"}, "default": ""}, - {"name": "subjectContains", "type": "string", "required": False, "description": {"en": "Subject must contain this text", "de": "Betreff muss diesen Text enthalten", "fr": "Le sujet doit contenir ce texte"}, "default": ""}, - {"name": "bodyContains", "type": "string", "required": False, "description": {"en": "Body/content must contain this text", "de": "Inhalt muss diesen Text enthalten", "fr": "Le corps doit contenir ce texte"}, "default": ""}, - {"name": "hasAttachment", "type": "boolean", "required": False, "description": {"en": "Only emails with attachments", "de": "Nur E-Mails mit Anhängen", "fr": "Seulement les e-mails avec pièces jointes"}, "default": False}, - {"name": "filter", "type": "string", "required": False, "description": {"en": "Advanced: raw KQL (overrides above if set)", "de": "Erweitert: KQL-Filter (überschreibt obige)", "fr": "Avancé: filtre KQL brut"}, "default": ""}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, + {"name": "query", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Search term", "de": "Suchbegriff", "fr": "Terme de recherche"}, "default": ""}, + {"name": "folder", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Folder to search", "de": "Ordner", "fr": "Dossier"}, "default": "Inbox"}, + {"name": "limit", "type": "number", "required": False, "frontendType": "number", + "description": {"en": "Max emails", "de": "Max E-Mails", "fr": "Max emails"}, "default": 100}, + {"name": "fromAddress", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "From address", "de": "Von Adresse", "fr": "De l'adresse"}, "default": ""}, + {"name": "toAddress", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "To address", "de": "An Adresse", "fr": "À l'adresse"}, "default": ""}, + {"name": "subjectContains", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Subject contains", "de": "Betreff enthält", "fr": "Sujet contient"}, "default": ""}, + {"name": "bodyContains", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Body contains", "de": "Inhalt enthält", "fr": "Corps contient"}, "default": ""}, + {"name": "hasAttachment", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "With attachments", "de": "Mit Anhängen", "fr": "Avec pièces jointes"}, "default": False}, + {"name": "filter", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Advanced: raw KQL", "de": "Erweitert: KQL-Filter", "fr": "Avancé: filtre KQL"}, "default": ""}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "EmailList"}}, "meta": {"icon": "mdi-email-search", "color": "#1976D2"}, "_method": "outlook", "_action": "searchEmails", - "_paramMap": {"connectionId": "connectionReference", "query": "query", "folder": "folder", "limit": "limit", "filter": "filter"}, }, { "id": "email.draftEmail", "category": "email", "label": {"en": "Draft Email", "de": "E-Mail entwerfen", "fr": "Brouillon email"}, - "description": {"en": "Create a draft email", "de": "E-Mail-Entwurf erstellen", "fr": "Créer un brouillon d'email"}, + "description": {"en": "Create a draft email", "de": "E-Mail-Entwurf erstellen", "fr": "Créer un brouillon"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "Email account connection", "de": "E-Mail-Konto Verbindung", "fr": "Connexion compte email"}}, - {"name": "subject", "type": "string", "required": True, "description": {"en": "Email subject", "de": "E-Mail-Betreff", "fr": "Sujet"}}, - {"name": "body", "type": "string", "required": True, "description": {"en": "Email body", "de": "E-Mail-Text", "fr": "Corps de l'email"}}, - {"name": "to", "type": "string", "required": False, "description": {"en": "Recipient(s)", "de": "Empfänger", "fr": "Destinataire(s)"}, "default": ""}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "Email account", "de": "E-Mail-Konto", "fr": "Compte email"}}, + {"name": "subject", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Subject", "de": "Betreff", "fr": "Sujet"}}, + {"name": "body", "type": "string", "required": True, "frontendType": "textarea", + "description": {"en": "Body", "de": "Inhalt", "fr": "Corps"}}, + {"name": "to", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Recipient(s)", "de": "Empfänger", "fr": "Destinataire(s)"}, "default": ""}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["EmailDraft", "AiResult", "Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-email-edit", "color": "#1976D2"}, "_method": "outlook", "_action": "composeAndDraftEmailWithContext", - "_paramMap": {"connectionId": "connectionReference", "to": "to"}, - "_contextFrom": ["subject", "body"], }, ] diff --git a/modules/features/graphicalEditor/nodeDefinitions/file.py b/modules/features/graphicalEditor/nodeDefinitions/file.py index bb168218..9f5bea7a 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/file.py +++ b/modules/features/graphicalEditor/nodeDefinitions/file.py @@ -7,54 +7,31 @@ FILE_NODES = [ "category": "file", "label": {"en": "Create File", "de": "Datei erstellen", "fr": "Créer fichier"}, "description": { - "en": "Create a file from context (text/markdown from AI). Configurable format and style.", - "de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI). Format und Stil konfigurierbar.", - "fr": "Crée un fichier à partir du contexte. Format et style configurables.", + "en": "Create a file from context (text/markdown from AI).", + "de": "Erstellt eine Datei aus Kontext (Text/Markdown von KI).", + "fr": "Crée un fichier à partir du contexte.", }, "parameters": [ - { - "name": "contentSources", - "type": "json", - "required": False, - "description": { - "en": "Array of context refs (e.g. AI, form). Concatenated in order. Empty = from connected node.", - "de": "Liste von Kontext-Quellen (z.B. KI, Formular). Werden nacheinander zusammengefügt. Leer = vom verbundenen Node.", - "fr": "Liste de sources de contexte. Concaténées dans l'ordre. Vide = du noeud connecté.", - }, - "default": [], - }, - { - "name": "outputFormat", - "type": "string", - "required": True, - "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, - "default": "docx", - }, - { - "name": "title", - "type": "string", - "required": False, - "description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"}, - }, - { - "name": "templateName", - "type": "string", - "required": False, - "description": {"en": "Style preset: default, corporate, minimal", "de": "Stil-Vorlage", "fr": "Prését style"}, - }, - { - "name": "language", - "type": "string", - "required": False, - "description": {"en": "Language code (de, en, fr)", "de": "Sprachcode", "fr": "Code langue"}, - "default": "de", - }, + {"name": "contentSources", "type": "json", "required": False, "frontendType": "json", + "description": {"en": "Context source refs", "de": "Kontext-Quellen", "fr": "Sources de contexte"}, "default": []}, + {"name": "outputFormat", "type": "string", "required": True, "frontendType": "select", + "frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]}, + "description": {"en": "Output format", "de": "Ausgabeformat", "fr": "Format de sortie"}, "default": "docx"}, + {"name": "title", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Document title", "de": "Dokumenttitel", "fr": "Titre du document"}}, + {"name": "templateName", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["default", "corporate", "minimal"]}, + "description": {"en": "Style preset", "de": "Stil-Vorlage", "fr": "Prését style"}}, + {"name": "language", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["de", "en", "fr"]}, + "description": {"en": "Language", "de": "Sprache", "fr": "Langue"}, "default": "de"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3"}, "_method": "file", "_action": "create", - "_paramMap": {}, }, ] diff --git a/modules/features/graphicalEditor/nodeDefinitions/flow.py b/modules/features/graphicalEditor/nodeDefinitions/flow.py index 02e25764..c3d0a84d 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/flow.py +++ b/modules/features/graphicalEditor/nodeDefinitions/flow.py @@ -8,11 +8,19 @@ FLOW_NODES = [ "label": {"en": "If / Else", "de": "Wenn / Sonst", "fr": "Si / Sinon"}, "description": {"en": "Branch based on condition", "de": "Verzweigung nach Bedingung", "fr": "Branche selon condition"}, "parameters": [ - {"name": "condition", "type": "string", "required": True, "description": {"en": "Expression to evaluate (e.g. {{value}} > 0)", "de": "Bedingung", "fr": "Condition"}}, + { + "name": "condition", + "type": "string", + "required": True, + "frontendType": "condition", + "description": {"en": "Condition to evaluate", "de": "Bedingung", "fr": "Condition"}, + }, ], "inputs": 1, "outputs": 2, "outputLabels": {"en": ["Yes", "No"], "de": ["Ja", "Nein"], "fr": ["Oui", "Non"]}, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "Transit"}, 1: {"schema": "Transit"}}, "executor": "flow", "meta": {"icon": "mdi-source-branch", "color": "#FF9800"}, }, @@ -22,11 +30,25 @@ FLOW_NODES = [ "label": {"en": "Switch", "de": "Switch", "fr": "Switch"}, "description": {"en": "Multiple branches based on value", "de": "Mehrere Zweige nach Wert", "fr": "Branches multiples selon valeur"}, "parameters": [ - {"name": "value", "type": "string", "required": True, "description": {"en": "Value to match", "de": "Zu vergleichender Wert", "fr": "Valeur à comparer"}}, - {"name": "cases", "type": "array", "required": False, "description": {"en": "List of cases", "de": "Fälle", "fr": "Cas"}}, + { + "name": "value", + "type": "string", + "required": True, + "frontendType": "text", + "description": {"en": "Value to match", "de": "Zu vergleichender Wert", "fr": "Valeur à comparer"}, + }, + { + "name": "cases", + "type": "array", + "required": False, + "frontendType": "caseList", + "description": {"en": "List of cases", "de": "Fälle", "fr": "Cas"}, + }, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "Transit"}}, "executor": "flow", "meta": {"icon": "mdi-swap-horizontal", "color": "#FF9800"}, }, @@ -36,11 +58,42 @@ FLOW_NODES = [ "label": {"en": "Loop / For Each", "de": "Schleife / Für Jedes", "fr": "Boucle / Pour Chaque"}, "description": {"en": "Iterate over array items", "de": "Über Array-Elemente iterieren", "fr": "Itérer sur les éléments"}, "parameters": [ - {"name": "items", "type": "string", "required": True, "description": {"en": "Path to array (e.g. {{input.items}})", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"}}, + { + "name": "items", + "type": "string", + "required": True, + "frontendType": "text", + "description": {"en": "Path to array (e.g. {{input.items}})", "de": "Pfad zum Array", "fr": "Chemin vers le tableau"}, + }, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "LoopItem"}}, "executor": "flow", "meta": {"icon": "mdi-repeat", "color": "#FF9800"}, }, + { + "id": "flow.merge", + "category": "flow", + "label": {"en": "Merge", "de": "Zusammenführen", "fr": "Fusionner"}, + "description": {"en": "Merge multiple branches", "de": "Mehrere Zweige zusammenführen", "fr": "Fusionner plusieurs branches"}, + "parameters": [ + { + "name": "mode", + "type": "string", + "required": False, + "frontendType": "select", + "frontendOptions": {"options": ["first", "all", "append"]}, + "description": {"en": "Merge mode", "de": "Zusammenführungsmodus", "fr": "Mode de fusion"}, + "default": "first", + }, + ], + "inputs": 2, + "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}, 1: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "MergeResult"}}, + "executor": "flow", + "meta": {"icon": "mdi-call-merge", "color": "#FF9800"}, + }, ] diff --git a/modules/features/graphicalEditor/nodeDefinitions/input.py b/modules/features/graphicalEditor/nodeDefinitions/input.py index d9c56c78..4d15de46 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/input.py +++ b/modules/features/graphicalEditor/nodeDefinitions/input.py @@ -12,9 +12,10 @@ INPUT_NODES = [ "name": "fields", "type": "json", "required": True, + "frontendType": "fieldBuilder", "description": { - "en": "Form fields: [{name, type, label, required, options?}]. type may include clickup_tasks with clickupConnectionId + clickupListId for a ClickUp task dropdown (value {add, rem}).", - "de": "Formularfelder. type: u. a. clickup_tasks mit clickupConnectionId und clickupListId für ClickUp-Aufgaben-Dropdown (Wert wie Relationship-Feld).", + "en": "Form fields: [{name, type, label, required, options?}]", + "de": "Formularfelder", "fr": "Champs du formulaire", }, "default": [], @@ -22,6 +23,8 @@ INPUT_NODES = [ ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "fields"}}, "executor": "input", "meta": {"icon": "mdi-form-textbox", "color": "#9C27B0"}, }, @@ -31,12 +34,18 @@ INPUT_NODES = [ "label": {"en": "Approval", "de": "Genehmigung", "fr": "Approbation"}, "description": {"en": "User approves or rejects", "de": "Benutzer genehmigt oder lehnt ab", "fr": "L'utilisateur approuve ou rejette"}, "parameters": [ - {"name": "title", "type": "string", "required": True, "description": {"en": "Approval title", "de": "Genehmigungstitel", "fr": "Titre"}}, - {"name": "description", "type": "string", "required": False, "description": {"en": "What to approve", "de": "Was genehmigt werden soll", "fr": "Ce qu'il faut approuver"}}, - {"name": "approvalType", "type": "string", "required": False, "description": {"en": "Type: document or generic", "de": "Typ: document oder generic", "fr": "Type: document ou generic"}, "default": "generic"}, + {"name": "title", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Approval title", "de": "Genehmigungstitel", "fr": "Titre"}}, + {"name": "description", "type": "string", "required": False, "frontendType": "textarea", + "description": {"en": "What to approve", "de": "Was genehmigt werden soll", "fr": "Ce qu'il faut approuver"}}, + {"name": "approvalType", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["generic", "document"]}, + "description": {"en": "Type: document or generic", "de": "Typ: document oder generic", "fr": "Type"}, "default": "generic"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "BoolResult"}}, "executor": "input", "meta": {"icon": "mdi-check-decagram", "color": "#4CAF50"}, }, @@ -46,13 +55,20 @@ INPUT_NODES = [ "label": {"en": "Upload", "de": "Upload", "fr": "Téléversement"}, "description": {"en": "User uploads file(s)", "de": "Benutzer lädt Datei(en) hoch", "fr": "L'utilisateur téléverse des fichiers"}, "parameters": [ - {"name": "accept", "type": "string", "required": False, "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String für Dateiauswahl", "fr": "Chaîne accept"}, "default": ""}, - {"name": "allowedTypes", "type": "json", "required": False, "description": {"en": "Selected file types (from UI multi-select)", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []}, - {"name": "maxSize", "type": "number", "required": False, "description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10}, - {"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False}, + {"name": "accept", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Accept string for file input (e.g. .pdf,image/*)", "de": "Accept-String", "fr": "Chaîne accept"}, "default": ""}, + {"name": "allowedTypes", "type": "json", "required": False, "frontendType": "multiselect", + "frontendOptions": {"options": ["pdf", "docx", "xlsx", "pptx", "txt", "csv", "jpg", "png", "gif"]}, + "description": {"en": "Selected file types", "de": "Ausgewählte Dateitypen", "fr": "Types sélectionnés"}, "default": []}, + {"name": "maxSize", "type": "number", "required": False, "frontendType": "number", + "description": {"en": "Max file size in MB", "de": "Max. Dateigröße in MB", "fr": "Taille max en Mo"}, "default": 10}, + {"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Allow multiple files", "de": "Mehrere Dateien erlauben", "fr": "Autoriser plusieurs fichiers"}, "default": False}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "executor": "input", "meta": {"icon": "mdi-upload", "color": "#2196F3"}, }, @@ -62,11 +78,15 @@ INPUT_NODES = [ "label": {"en": "Comment", "de": "Kommentar", "fr": "Commentaire"}, "description": {"en": "User adds a comment", "de": "Benutzer fügt einen Kommentar hinzu", "fr": "L'utilisateur ajoute un commentaire"}, "parameters": [ - {"name": "placeholder", "type": "string", "required": False, "description": {"en": "Placeholder text", "de": "Platzhalter", "fr": "Texte indicatif"}, "default": ""}, - {"name": "required", "type": "boolean", "required": False, "description": {"en": "Comment required", "de": "Kommentar erforderlich", "fr": "Commentaire requis"}, "default": True}, + {"name": "placeholder", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Placeholder text", "de": "Platzhalter", "fr": "Texte indicatif"}, "default": ""}, + {"name": "required", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Comment required", "de": "Kommentar erforderlich", "fr": "Commentaire requis"}, "default": True}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "TextResult"}}, "executor": "input", "meta": {"icon": "mdi-comment-text", "color": "#FF9800"}, }, @@ -76,11 +96,16 @@ INPUT_NODES = [ "label": {"en": "Review", "de": "Prüfung", "fr": "Revue"}, "description": {"en": "User reviews content", "de": "Benutzer prüft Inhalt", "fr": "L'utilisateur révise le contenu"}, "parameters": [ - {"name": "contentRef", "type": "string", "required": True, "description": {"en": "Reference to content (e.g. {{nodeId.field}})", "de": "Referenz auf Inhalt", "fr": "Référence au contenu"}}, - {"name": "reviewType", "type": "string", "required": False, "description": {"en": "Type of review", "de": "Art der Prüfung", "fr": "Type de revue"}, "default": "generic"}, + {"name": "contentRef", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Reference to content", "de": "Referenz auf Inhalt", "fr": "Référence au contenu"}}, + {"name": "reviewType", "type": "string", "required": False, "frontendType": "select", + "frontendOptions": {"options": ["generic", "document"]}, + "description": {"en": "Type of review", "de": "Art der Prüfung", "fr": "Type de revue"}, "default": "generic"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "BoolResult"}}, "executor": "input", "meta": {"icon": "mdi-magnify-scan", "color": "#673AB7"}, }, @@ -90,17 +115,15 @@ INPUT_NODES = [ "label": {"en": "Selection", "de": "Auswahl", "fr": "Sélection"}, "description": {"en": "User selects from options", "de": "Benutzer wählt aus Optionen", "fr": "L'utilisateur choisit parmi les options"}, "parameters": [ - { - "name": "options", - "type": "json", - "required": True, - "description": {"en": "Options: [{value, label}]", "de": "Optionen", "fr": "Options"}, - "default": [], - }, - {"name": "multiple", "type": "boolean", "required": False, "description": {"en": "Allow multiple selection", "de": "Mehrfachauswahl erlauben", "fr": "Sélection multiple"}, "default": False}, + {"name": "options", "type": "json", "required": True, "frontendType": "keyValueRows", + "description": {"en": "Options: [{value, label}]", "de": "Optionen", "fr": "Options"}, "default": []}, + {"name": "multiple", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Allow multiple selection", "de": "Mehrfachauswahl erlauben", "fr": "Sélection multiple"}, "default": False}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "TextResult"}}, "executor": "input", "meta": {"icon": "mdi-format-list-checks", "color": "#009688"}, }, @@ -110,12 +133,17 @@ INPUT_NODES = [ "label": {"en": "Confirmation", "de": "Bestätigung", "fr": "Confirmation"}, "description": {"en": "User confirms yes/no", "de": "Benutzer bestätigt Ja/Nein", "fr": "L'utilisateur confirme oui/non"}, "parameters": [ - {"name": "question", "type": "string", "required": True, "description": {"en": "Question to confirm", "de": "Zu bestätigende Frage", "fr": "Question à confirmer"}}, - {"name": "confirmLabel", "type": "string", "required": False, "description": {"en": "Label for confirm button", "de": "Label für Bestätigen-Button", "fr": "Libellé du bouton confirmer"}, "default": "Confirm"}, - {"name": "rejectLabel", "type": "string", "required": False, "description": {"en": "Label for reject button", "de": "Label für Ablehnen-Button", "fr": "Libellé du bouton refuser"}, "default": "Reject"}, + {"name": "question", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Question to confirm", "de": "Zu bestätigende Frage", "fr": "Question à confirmer"}}, + {"name": "confirmLabel", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Label for confirm button", "de": "Label für Bestätigen-Button", "fr": "Libellé confirmer"}, "default": "Confirm"}, + {"name": "rejectLabel", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Label for reject button", "de": "Label für Ablehnen-Button", "fr": "Libellé refuser"}, "default": "Reject"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "BoolResult"}}, "executor": "input", "meta": {"icon": "mdi-checkbox-marked-circle", "color": "#8BC34A"}, }, diff --git a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py index f0dd30cf..5490499f 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py +++ b/modules/features/graphicalEditor/nodeDefinitions/sharepoint.py @@ -1,6 +1,5 @@ # Copyright (c) 2025 Patrick Motsch # SharePoint node definitions - map to methodSharepoint actions. -# Use connectionId and path from connection selector (like workflow folder view). SHAREPOINT_NODES = [ { @@ -9,17 +8,22 @@ SHAREPOINT_NODES = [ "label": {"en": "Find File", "de": "Datei finden", "fr": "Trouver fichier"}, "description": {"en": "Find file by path or search", "de": "Datei nach Pfad oder Suche finden", "fr": "Trouver fichier par chemin ou recherche"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, - {"name": "searchQuery", "type": "string", "required": True, "description": {"en": "Search query or path", "de": "Suchanfrage oder Pfad", "fr": "Requête ou chemin"}}, - {"name": "site", "type": "string", "required": False, "description": {"en": "Optional site hint", "de": "Optionaler Site-Hinweis", "fr": "Indication de site"}, "default": ""}, - {"name": "maxResults", "type": "number", "required": False, "description": {"en": "Max results", "de": "Max Ergebnisse", "fr": "Max résultats"}, "default": 1000}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, + {"name": "searchQuery", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Search query or path", "de": "Suchanfrage oder Pfad", "fr": "Requête ou chemin"}}, + {"name": "site", "type": "string", "required": False, "frontendType": "text", + "description": {"en": "Optional site hint", "de": "Optionaler Site-Hinweis", "fr": "Indication de site"}, "default": ""}, + {"name": "maxResults", "type": "number", "required": False, "frontendType": "number", + "description": {"en": "Max results", "de": "Max Ergebnisse", "fr": "Max résultats"}, "default": 1000}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "FileList"}}, "meta": {"icon": "mdi-file-search", "color": "#0078D4"}, "_method": "sharepoint", "_action": "findDocumentPath", - "_paramMap": {"connectionId": "connectionReference", "searchQuery": "searchQuery", "site": "site", "maxResults": "maxResults"}, }, { "id": "sharepoint.readFile", @@ -27,15 +31,19 @@ SHAREPOINT_NODES = [ "label": {"en": "Read File", "de": "Datei lesen", "fr": "Lire fichier"}, "description": {"en": "Extract content from file", "de": "Inhalt aus Datei extrahieren", "fr": "Extraire le contenu du fichier"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, - {"name": "path", "type": "string", "required": True, "description": {"en": "File path or documentList from find file", "de": "Dateipfad oder documentList von Find", "fr": "Chemin ou documentList"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, + {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "File path", "de": "Dateipfad", "fr": "Chemin"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["FileList", "Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-file-document", "color": "#0078D4"}, "_method": "sharepoint", "_action": "readDocuments", - "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"}, }, { "id": "sharepoint.uploadFile", @@ -43,47 +51,59 @@ SHAREPOINT_NODES = [ "label": {"en": "Upload File", "de": "Datei hochladen", "fr": "Téléverser fichier"}, "description": {"en": "Upload file to SharePoint", "de": "Datei zu SharePoint hochladen", "fr": "Téléverser fichier vers SharePoint"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, - {"name": "path", "type": "string", "required": True, "description": {"en": "Target folder path (e.g. /sites/.../Folder)", "de": "Zielordner-Pfad", "fr": "Chemin du dossier cible"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, + {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFolder", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Target folder path", "de": "Zielordner-Pfad", "fr": "Chemin du dossier cible"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-upload", "color": "#0078D4"}, "_method": "sharepoint", "_action": "uploadFile", - "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"}, }, { "id": "sharepoint.listFiles", "category": "sharepoint", "label": {"en": "List Files", "de": "Dateien auflisten", "fr": "Lister fichiers"}, - "description": {"en": "List files in folder or SharePoint", "de": "Dateien in Ordner oder SharePoint auflisten", "fr": "Lister les fichiers dans un dossier"}, + "description": {"en": "List files in folder", "de": "Dateien in Ordner auflisten", "fr": "Lister les fichiers"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, - {"name": "path", "type": "string", "required": False, "description": {"en": "Folder path (e.g. /sites/SiteName/Shared Documents)", "de": "Ordnerpfad", "fr": "Chemin du dossier"}, "default": "/"}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, + {"name": "pathQuery", "type": "string", "required": False, "frontendType": "sharepointFolder", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Folder path", "de": "Ordnerpfad", "fr": "Chemin du dossier"}, "default": "/"}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "FileList"}}, "meta": {"icon": "mdi-folder-open", "color": "#0078D4"}, "_method": "sharepoint", "_action": "listDocuments", - "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery"}, }, { "id": "sharepoint.downloadFile", "category": "sharepoint", "label": {"en": "Download File", "de": "Datei herunterladen", "fr": "Télécharger fichier"}, - "description": {"en": "Download file from path (e.g. /sites/SiteName/Shared Documents/file.pdf)", "de": "Datei vom Pfad herunterladen", "fr": "Télécharger le fichier"}, + "description": {"en": "Download file from path", "de": "Datei vom Pfad herunterladen", "fr": "Télécharger le fichier"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, - {"name": "path", "type": "string", "required": True, "description": {"en": "Full file path (e.g. /sites/SiteName/Shared Documents/file.pdf)", "de": "Vollständiger Dateipfad", "fr": "Chemin complet du fichier"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, + {"name": "pathQuery", "type": "string", "required": True, "frontendType": "sharepointFile", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Full file path", "de": "Vollständiger Dateipfad", "fr": "Chemin complet du fichier"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["FileList", "Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-download", "color": "#0078D4"}, "_method": "sharepoint", "_action": "downloadFileByPath", - "_paramMap": {"connectionId": "connectionReference", "path": "pathQuery", "siteId": "siteId", "filePath": "filePath"}, }, { "id": "sharepoint.copyFile", @@ -91,15 +111,21 @@ SHAREPOINT_NODES = [ "label": {"en": "Copy File", "de": "Datei kopieren", "fr": "Copier fichier"}, "description": {"en": "Copy file to destination", "de": "Datei an Ziel kopieren", "fr": "Copier le fichier"}, "parameters": [ - {"name": "connectionId", "type": "string", "required": True, "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, - {"name": "sourcePath", "type": "string", "required": True, "description": {"en": "Source file path (from browse)", "de": "Quelldatei-Pfad", "fr": "Chemin fichier source"}}, - {"name": "destPath", "type": "string", "required": True, "description": {"en": "Destination folder path (from browse)", "de": "Zielordner-Pfad", "fr": "Chemin dossier cible"}}, + {"name": "connectionReference", "type": "string", "required": True, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}}, + {"name": "sourcePath", "type": "string", "required": True, "frontendType": "sharepointFile", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Source file path", "de": "Quelldatei-Pfad", "fr": "Chemin fichier source"}}, + {"name": "destPath", "type": "string", "required": True, "frontendType": "sharepointFolder", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "Destination folder", "de": "Zielordner", "fr": "Dossier cible"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-content-copy", "color": "#0078D4"}, "_method": "sharepoint", "_action": "copyFile", - "_paramMap": {"connectionId": "connectionReference", "sourcePath": "sourcePath", "destPath": "destPath"}, }, ] diff --git a/modules/features/graphicalEditor/nodeDefinitions/triggers.py b/modules/features/graphicalEditor/nodeDefinitions/triggers.py index 5071a762..ab9d75ed 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/triggers.py +++ b/modules/features/graphicalEditor/nodeDefinitions/triggers.py @@ -14,6 +14,8 @@ TRIGGER_NODES = [ "parameters": [], "inputs": 0, "outputs": 1, + "inputPorts": {}, + "outputPorts": {0: {"schema": "ActionResult"}}, "executor": "trigger", "meta": {"icon": "mdi-play", "color": "#4CAF50"}, }, @@ -31,11 +33,14 @@ TRIGGER_NODES = [ "name": "formFields", "type": "json", "required": False, + "frontendType": "fieldBuilder", "description": {"en": "Field definitions", "de": "Felddefinitionen", "fr": "Définitions"}, }, ], "inputs": 0, "outputs": 1, + "inputPorts": {}, + "outputPorts": {0: {"schema": "FormPayload", "dynamic": True, "deriveFrom": "formFields"}}, "executor": "trigger", "meta": {"icon": "mdi-form-select", "color": "#9C27B0"}, }, @@ -53,11 +58,14 @@ TRIGGER_NODES = [ "name": "cron", "type": "string", "required": False, + "frontendType": "cron", "description": {"en": "Cron expression", "de": "Cron-Ausdruck", "fr": "Expression cron"}, }, ], "inputs": 0, "outputs": 1, + "inputPorts": {}, + "outputPorts": {0: {"schema": "ActionResult"}}, "executor": "trigger", "meta": {"icon": "mdi-clock", "color": "#2196F3"}, }, diff --git a/modules/features/graphicalEditor/nodeDefinitions/trustee.py b/modules/features/graphicalEditor/nodeDefinitions/trustee.py index 4d7082ae..7d57c91c 100644 --- a/modules/features/graphicalEditor/nodeDefinitions/trustee.py +++ b/modules/features/graphicalEditor/nodeDefinitions/trustee.py @@ -1,6 +1,5 @@ # Copyright (c) 2025 Patrick Motsch # Trustee node definitions - map to methodTrustee actions. -# Pipeline: extractFromFiles -> processDocuments -> syncToAccounting. TRUSTEE_NODES = [ { @@ -8,83 +7,100 @@ TRUSTEE_NODES = [ "category": "trustee", "label": {"en": "Refresh Accounting Data", "de": "Buchhaltungsdaten aktualisieren", "fr": "Actualiser données comptables"}, "description": { - "en": "Import/refresh accounting data from external system (e.g. Abacus). Skips import if data is fresh unless forceRefresh is set.", - "de": "Buchhaltungsdaten aus externem System importieren/aktualisieren (z.B. Abacus). Überspringt Import wenn Daten frisch sind, ausser forceRefresh ist gesetzt.", - "fr": "Importer/actualiser les données comptables depuis le système externe (ex. Abacus).", + "en": "Import/refresh accounting data from external system (e.g. Abacus).", + "de": "Buchhaltungsdaten aus externem System importieren/aktualisieren.", + "fr": "Importer/actualiser les données comptables.", }, "parameters": [ - {"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, - {"name": "forceRefresh", "type": "boolean", "required": False, "description": {"en": "Force re-import even if data is fresh (default: false)", "de": "Import erzwingen auch wenn Daten frisch sind", "fr": "Forcer la réimportation"}, "default": False}, - {"name": "dateFrom", "type": "string", "required": False, "description": {"en": "Start date filter (YYYY-MM-DD)", "de": "Startdatum-Filter (JJJJ-MM-TT)", "fr": "Date début (AAAA-MM-JJ)"}, "default": ""}, - {"name": "dateTo", "type": "string", "required": False, "description": {"en": "End date filter (YYYY-MM-DD)", "de": "Enddatum-Filter (JJJJ-MM-TT)", "fr": "Date fin (AAAA-MM-JJ)"}, "default": ""}, + {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", + "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, + {"name": "forceRefresh", "type": "boolean", "required": False, "frontendType": "checkbox", + "description": {"en": "Force re-import", "de": "Import erzwingen", "fr": "Forcer la réimportation"}, "default": False}, + {"name": "dateFrom", "type": "string", "required": False, "frontendType": "date", + "description": {"en": "Start date (YYYY-MM-DD)", "de": "Startdatum", "fr": "Date début"}, "default": ""}, + {"name": "dateTo", "type": "string", "required": False, "frontendType": "date", + "description": {"en": "End date (YYYY-MM-DD)", "de": "Enddatum", "fr": "Date fin"}, "default": ""}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-database-refresh", "color": "#4CAF50"}, "_method": "trustee", "_action": "refreshAccountingData", - "_paramMap": {"featureInstanceId": "featureInstanceId", "forceRefresh": "forceRefresh", "dateFrom": "dateFrom", "dateTo": "dateTo"}, }, { "id": "trustee.extractFromFiles", "category": "trustee", "label": {"en": "Extract Documents", "de": "Dokumente extrahieren", "fr": "Extraire documents"}, "description": { - "en": "Extract document type and data from PDF/JPG via AI (from fileIds or SharePoint folder)", - "de": "Dokumenttyp und Daten aus PDF/JPG per AI extrahieren (aus Dateien oder SharePoint-Ordner)", - "fr": "Extraire type et données de PDF/JPG par IA", + "en": "Extract document type and data from PDF/JPG via AI.", + "de": "Dokumenttyp und Daten aus PDF/JPG per AI extrahieren.", + "fr": "Extraire type et données de PDF/JPG par IA.", }, "parameters": [ - {"name": "connectionId", "type": "string", "required": False, "description": {"en": "SharePoint connection (if reading from SharePoint)", "de": "SharePoint-Verbindung (falls aus SharePoint)", "fr": "Connexion SharePoint"}, "default": ""}, - {"name": "sharepointFolder", "type": "string", "required": False, "description": {"en": "SharePoint folder path (e.g. /sites/MySite/Documents/Expenses)", "de": "SharePoint-Ordnerpfad", "fr": "Chemin dossier SharePoint"}, "default": ""}, - {"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, - {"name": "prompt", "type": "string", "required": False, "description": {"en": "AI prompt for extraction (optional)", "de": "AI-Prompt für Extraktion (optional)", "fr": "Prompt IA pour extraction"}, "default": ""}, + {"name": "connectionReference", "type": "string", "required": False, "frontendType": "userConnection", + "description": {"en": "SharePoint connection", "de": "SharePoint-Verbindung", "fr": "Connexion SharePoint"}, "default": ""}, + {"name": "sharepointFolder", "type": "string", "required": False, "frontendType": "sharepointFolder", + "frontendOptions": {"dependsOn": "connectionReference"}, + "description": {"en": "SharePoint folder path", "de": "SharePoint-Ordnerpfad", "fr": "Chemin dossier SharePoint"}, "default": ""}, + {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", + "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, + {"name": "prompt", "type": "string", "required": False, "frontendType": "textarea", + "description": {"en": "AI prompt for extraction", "de": "AI-Prompt für Extraktion", "fr": "Prompt IA"}, "default": ""}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "DocumentList"}}, "meta": {"icon": "mdi-file-document-scan", "color": "#4CAF50"}, "_method": "trustee", "_action": "extractFromFiles", - "_paramMap": {"connectionId": "connectionReference", "sharepointFolder": "sharepointFolder", "featureInstanceId": "featureInstanceId", "prompt": "prompt"}, }, { "id": "trustee.processDocuments", "category": "trustee", "label": {"en": "Process Documents", "de": "Dokumente verarbeiten", "fr": "Traiter documents"}, "description": { - "en": "Create TrusteeDocument + TrusteePosition from extraction result", - "de": "TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen", - "fr": "Créer TrusteeDocument + TrusteePosition à partir du résultat", + "en": "Create TrusteeDocument + TrusteePosition from extraction result.", + "de": "TrusteeDocument + TrusteePosition aus Extraktionsergebnis erstellen.", + "fr": "Créer TrusteeDocument + TrusteePosition.", }, "parameters": [ - {"name": "documentList", "type": "string", "required": True, "description": {"en": "Reference to extractFromFiles result", "de": "Referenz auf extractFromFiles-Ergebnis", "fr": "Référence au résultat extractFromFiles"}}, - {"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, + {"name": "documentList", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Reference to extraction result", "de": "Referenz auf Ergebnis", "fr": "Référence au résultat"}}, + {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", + "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["DocumentList", "Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-file-document-check", "color": "#4CAF50"}, "_method": "trustee", "_action": "processDocuments", - "_paramMap": {"documentList": "documentList", "featureInstanceId": "featureInstanceId"}, }, { "id": "trustee.syncToAccounting", "category": "trustee", "label": {"en": "Sync to Accounting", "de": "In Buchhaltung synchronisieren", "fr": "Synchroniser comptabilité"}, "description": { - "en": "Push trustee positions to accounting system", - "de": "Trustee-Positionen in Buchhaltungssystem übertragen", - "fr": "Transférer les positions vers la comptabilité", + "en": "Push trustee positions to accounting system.", + "de": "Trustee-Positionen in Buchhaltungssystem übertragen.", + "fr": "Transférer les positions vers la comptabilité.", }, "parameters": [ - {"name": "documentList", "type": "string", "required": True, "description": {"en": "Reference to processDocuments result", "de": "Referenz auf processDocuments-Ergebnis", "fr": "Référence au résultat processDocuments"}}, - {"name": "featureInstanceId", "type": "string", "required": True, "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, + {"name": "documentList", "type": "string", "required": True, "frontendType": "text", + "description": {"en": "Reference to processed documents", "de": "Referenz auf Ergebnis", "fr": "Référence au résultat"}}, + {"name": "featureInstanceId", "type": "string", "required": True, "frontendType": "hidden", + "description": {"en": "Trustee feature instance ID", "de": "Trustee Feature-Instanz-ID", "fr": "ID instance Trustee"}}, ], "inputs": 1, "outputs": 1, + "inputPorts": {0: {"accepts": ["Transit"]}}, + "outputPorts": {0: {"schema": "ActionResult"}}, "meta": {"icon": "mdi-calculator", "color": "#4CAF50"}, "_method": "trustee", "_action": "syncToAccounting", - "_paramMap": {"documentList": "documentList", "featureInstanceId": "featureInstanceId"}, }, ] diff --git a/modules/features/graphicalEditor/nodeRegistry.py b/modules/features/graphicalEditor/nodeRegistry.py index 928840a4..3c42608b 100644 --- a/modules/features/graphicalEditor/nodeRegistry.py +++ b/modules/features/graphicalEditor/nodeRegistry.py @@ -9,6 +9,7 @@ import logging from typing import Dict, List, Any from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES +from modules.features.graphicalEditor.portTypes import PORT_TYPE_CATALOG, SYSTEM_VARIABLES logger = logging.getLogger(__name__) @@ -25,10 +26,9 @@ def getNodeTypes( def _localizeNode(node: Dict[str, Any], language: str) -> Dict[str, Any]: - """Apply language to label/description/parameters.""" + """Apply language to label/description/parameters. Keep inputPorts/outputPorts.""" lang = language if language in ("en", "de", "fr") else "en" out = dict(node) - # Strip internal keys for API response for key in list(out.keys()): if key.startswith("_"): del out[key] @@ -56,7 +56,7 @@ def getNodeTypesForApi( language: str = "en", ) -> Dict[str, Any]: """ - API-ready response: nodeTypes with localized strings, plus categories list. + API-ready response: nodeTypes with localized strings, plus categories, portTypeCatalog, systemVariables. """ nodes = getNodeTypes(services, language) localized = [_localizeNode(n, language) for n in nodes] @@ -72,7 +72,20 @@ def getNodeTypesForApi( {"id": "clickup", "label": {"en": "ClickUp", "de": "ClickUp", "fr": "ClickUp"}}, {"id": "trustee", "label": {"en": "Trustee", "de": "Treuhand", "fr": "Fiduciaire"}}, ] - return {"nodeTypes": localized, "categories": categories} + + catalogSerialized = {} + for name, schema in PORT_TYPE_CATALOG.items(): + catalogSerialized[name] = { + "name": schema.name, + "fields": [f.model_dump() for f in schema.fields], + } + + return { + "nodeTypes": localized, + "categories": categories, + "portTypeCatalog": catalogSerialized, + "systemVariables": SYSTEM_VARIABLES, + } def getNodeTypeToMethodAction() -> Dict[str, tuple]: diff --git a/modules/features/graphicalEditor/portTypes.py b/modules/features/graphicalEditor/portTypes.py new file mode 100644 index 00000000..523109b0 --- /dev/null +++ b/modules/features/graphicalEditor/portTypes.py @@ -0,0 +1,504 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Typed Port System for the Graphical Editor. + +Defines PortSchema, PORT_TYPE_CATALOG, SYSTEM_VARIABLES, +output normalizers, input extractors, and Transit helpers. +""" + +import logging +import time +import uuid +from typing import Any, Callable, Dict, List, Optional + +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Pydantic models +# --------------------------------------------------------------------------- + +class PortField(BaseModel): + name: str + type: str # str, int, bool, List[str], List[Document], Dict[str,Any] + description: Dict[str, str] = {} # {en, de, fr} + required: bool = True + + +class PortSchema(BaseModel): + name: str # e.g. "EmailDraft", "AiResult", "Transit" + fields: List[PortField] + + +class InputPortDef(BaseModel): + accepts: List[str] # list of accepted schema names + + +class OutputPortDef(BaseModel): + model_config = {"populate_by_name": True} + + schema_: str = Field(alias="schema") + dynamic: bool = False + deriveFrom: Optional[str] = None + + def model_dump(self, **kw): + d = super().model_dump(**kw) + d["schema"] = d.pop("schema_", d.get("schema")) + return d + + +# --------------------------------------------------------------------------- +# PORT_TYPE_CATALOG +# --------------------------------------------------------------------------- + +PORT_TYPE_CATALOG: Dict[str, PortSchema] = { + "DocumentList": PortSchema(name="DocumentList", fields=[ + PortField(name="documents", type="List[Document]", + description={"en": "List of documents", "de": "Dokumentenliste", "fr": "Liste de documents"}), + ]), + "FileList": PortSchema(name="FileList", fields=[ + PortField(name="files", type="List[File]", + description={"en": "List of files", "de": "Dateiliste", "fr": "Liste de fichiers"}), + ]), + "EmailDraft": PortSchema(name="EmailDraft", fields=[ + PortField(name="subject", type="str", + description={"en": "Subject", "de": "Betreff", "fr": "Sujet"}), + PortField(name="body", type="str", + description={"en": "Body", "de": "Inhalt", "fr": "Corps"}), + PortField(name="to", type="List[str]", + description={"en": "Recipients", "de": "Empfänger", "fr": "Destinataires"}), + PortField(name="cc", type="List[str]", required=False, + description={"en": "CC", "de": "CC", "fr": "CC"}), + PortField(name="attachments", type="List[Document]", required=False, + description={"en": "Attachments", "de": "Anhänge", "fr": "Pièces jointes"}), + ]), + "EmailList": PortSchema(name="EmailList", fields=[ + PortField(name="emails", type="List[Email]", + description={"en": "Emails", "de": "E-Mails", "fr": "Emails"}), + ]), + "TaskList": PortSchema(name="TaskList", fields=[ + PortField(name="tasks", type="List[Task]", + description={"en": "Tasks", "de": "Aufgaben", "fr": "Tâches"}), + ]), + "TaskResult": PortSchema(name="TaskResult", fields=[ + PortField(name="success", type="bool", + description={"en": "Success", "de": "Erfolg", "fr": "Succès"}), + PortField(name="taskId", type="str", + description={"en": "Task ID", "de": "Aufgaben-ID", "fr": "ID tâche"}), + PortField(name="task", type="Dict", + description={"en": "Task data", "de": "Aufgabendaten", "fr": "Données tâche"}), + ]), + "FormPayload": PortSchema(name="FormPayload", fields=[ + PortField(name="payload", type="Dict[str,Any]", + description={"en": "Form data", "de": "Formulardaten", "fr": "Données formulaire"}), + ]), + "AiResult": PortSchema(name="AiResult", fields=[ + PortField(name="prompt", type="str", + description={"en": "Prompt", "de": "Prompt", "fr": "Invite"}), + PortField(name="response", type="str", + description={"en": "Response text", "de": "Antworttext", "fr": "Texte réponse"}), + PortField(name="responseData", type="Dict", required=False, + description={"en": "Structured response", "de": "Strukturierte Antwort", "fr": "Réponse structurée"}), + PortField(name="context", type="str", + description={"en": "Context", "de": "Kontext", "fr": "Contexte"}), + PortField(name="documents", type="List[Document]", + description={"en": "Documents", "de": "Dokumente", "fr": "Documents"}), + ]), + "BoolResult": PortSchema(name="BoolResult", fields=[ + PortField(name="result", type="bool", + description={"en": "Result", "de": "Ergebnis", "fr": "Résultat"}), + PortField(name="reason", type="str", required=False, + description={"en": "Reason", "de": "Begründung", "fr": "Raison"}), + ]), + "TextResult": PortSchema(name="TextResult", fields=[ + PortField(name="text", type="str", + description={"en": "Text", "de": "Text", "fr": "Texte"}), + ]), + "LoopItem": PortSchema(name="LoopItem", fields=[ + PortField(name="currentItem", type="Any", + description={"en": "Current item", "de": "Aktuelles Element", "fr": "Élément courant"}), + PortField(name="currentIndex", type="int", + description={"en": "Current index", "de": "Aktueller Index", "fr": "Index courant"}), + PortField(name="items", type="List[Any]", + description={"en": "All items", "de": "Alle Elemente", "fr": "Tous les éléments"}), + PortField(name="count", type="int", + description={"en": "Total count", "de": "Gesamtanzahl", "fr": "Nombre total"}), + ]), + "AggregateResult": PortSchema(name="AggregateResult", fields=[ + PortField(name="items", type="List[Any]", + description={"en": "Collected items", "de": "Gesammelte Elemente", "fr": "Éléments collectés"}), + PortField(name="count", type="int", + description={"en": "Count", "de": "Anzahl", "fr": "Nombre"}), + ]), + "MergeResult": PortSchema(name="MergeResult", fields=[ + PortField(name="inputs", type="Dict[int,Any]", + description={"en": "Inputs by port", "de": "Eingaben nach Port", "fr": "Entrées par port"}), + PortField(name="first", type="Any", + description={"en": "First available", "de": "Erstes verfügbares", "fr": "Premier disponible"}), + PortField(name="merged", type="Dict", + description={"en": "Merged data", "de": "Zusammengeführte Daten", "fr": "Données fusionnées"}), + ]), + "ActionResult": PortSchema(name="ActionResult", fields=[ + PortField(name="success", type="bool", + description={"en": "Success", "de": "Erfolg", "fr": "Succès"}), + PortField(name="error", type="str", required=False, + description={"en": "Error", "de": "Fehler", "fr": "Erreur"}), + PortField(name="data", type="Dict", required=False, + description={"en": "Result data", "de": "Ergebnisdaten", "fr": "Données résultat"}), + ]), + "Transit": PortSchema(name="Transit", fields=[]), +} + + +# --------------------------------------------------------------------------- +# SYSTEM_VARIABLES +# --------------------------------------------------------------------------- + +SYSTEM_VARIABLES: Dict[str, Dict[str, str]] = { + "system.timestamp": {"type": "int", "description": "Unix timestamp (ms)"}, + "system.date": {"type": "str", "description": "ISO date (YYYY-MM-DD)"}, + "system.datetime": {"type": "str", "description": "ISO datetime"}, + "system.time": {"type": "str", "description": "HH:MM:SS"}, + "system.userId": {"type": "str", "description": "Current user ID"}, + "system.userName": {"type": "str", "description": "Current user name"}, + "system.userEmail": {"type": "str", "description": "Current user email"}, + "system.workflowId": {"type": "str", "description": "Workflow ID"}, + "system.runId": {"type": "str", "description": "Run ID"}, + "system.instanceId": {"type": "str", "description": "Feature instance ID"}, + "system.mandateId": {"type": "str", "description": "Mandate ID"}, + "system.loopIndex": {"type": "int", "description": "Current loop index (only in loop)"}, + "system.loopCount": {"type": "int", "description": "Loop item count (only in loop)"}, + "system.uuid": {"type": "str", "description": "Random UUID"}, +} + + +def _resolveSystemVariable(variable: str, context: Dict[str, Any]) -> Any: + """Resolve a system variable name to its runtime value.""" + from datetime import datetime, timezone + + now = datetime.now(timezone.utc) + mapping = { + "system.timestamp": lambda: int(now.timestamp() * 1000), + "system.date": lambda: now.strftime("%Y-%m-%d"), + "system.datetime": lambda: now.isoformat(), + "system.time": lambda: now.strftime("%H:%M:%S"), + "system.userId": lambda: context.get("userId", ""), + "system.userName": lambda: context.get("userName", ""), + "system.userEmail": lambda: context.get("userEmail", ""), + "system.workflowId": lambda: context.get("workflowId", ""), + "system.runId": lambda: context.get("_runId", ""), + "system.instanceId": lambda: context.get("instanceId", ""), + "system.mandateId": lambda: context.get("mandateId", ""), + "system.loopIndex": lambda: (context.get("_loopState") or {}).get("currentIndex", -1), + "system.loopCount": lambda: len((context.get("_loopState") or {}).get("items", [])), + "system.uuid": lambda: str(uuid.uuid4()), + } + resolver = mapping.get(variable) + if resolver: + return resolver() + logger.warning("Unknown system variable: %s", variable) + return None + + +# --------------------------------------------------------------------------- +# Output normalizers +# --------------------------------------------------------------------------- + +def _normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]: + """ + Normalize raw executor output to match the declared port schema. + Ensures _success/_error meta-fields are always present. + """ + if not isinstance(raw, dict): + raw = {"value": raw} if raw is not None else {} + + result = dict(raw) + result.setdefault("_success", not bool(raw.get("error"))) + result.setdefault("_error", raw.get("error")) + + schema = PORT_TYPE_CATALOG.get(schemaName) + if not schema or schemaName == "Transit": + return result + + for field in schema.fields: + if field.name not in result: + result[field.name] = _defaultForType(field.type) + + return result + + +def _defaultForType(typeStr: str) -> Any: + """Return a sensible default for a type string.""" + if typeStr.startswith("List"): + return [] + if typeStr.startswith("Dict"): + return {} + if typeStr == "bool": + return False + if typeStr == "int": + return 0 + if typeStr == "str": + return "" + return None + + +def _normalizeError(error: Exception, schemaName: str) -> Dict[str, Any]: + """Build an error envelope matching the schema with _success=False.""" + result = {"_success": False, "_error": str(error)} + schema = PORT_TYPE_CATALOG.get(schemaName) + if schema: + for field in schema.fields: + result.setdefault(field.name, _defaultForType(field.type)) + return result + + +# --------------------------------------------------------------------------- +# Input extractors (one per input port type) +# --------------------------------------------------------------------------- + +def _extractEmailDraft(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract EmailDraft fields from upstream output.""" + result = {} + if upstream.get("responseData") and isinstance(upstream["responseData"], dict): + rd = upstream["responseData"] + for key in ("subject", "body", "to", "cc"): + if key in rd: + result[key] = rd[key] + if not result: + for key in ("subject", "body", "to", "cc"): + if key in upstream: + result[key] = upstream[key] + return result + + +def _extractDocuments(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract documents from upstream output.""" + docs = upstream.get("documents") or upstream.get("documentList") or [] + if not docs and isinstance(upstream.get("data"), dict): + docs = upstream["data"].get("documents") or upstream["data"].get("documentList") or [] + # input.upload format + if not docs: + files = upstream.get("files") or [] + fileObj = upstream.get("file") + fileIds = upstream.get("fileIds") or [] + if fileObj: + docs = [fileObj] + elif files: + docs = files + elif fileIds: + docs = [{"validationMetadata": {"fileId": fid}} for fid in fileIds] + return {"documents": docs if isinstance(docs, list) else [docs]} if docs else {} + + +def _extractText(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract text from upstream output.""" + text = upstream.get("text") or upstream.get("response") or upstream.get("context") or "" + if not text and upstream.get("payload"): + import json + payload = upstream["payload"] + text = json.dumps(payload, ensure_ascii=False) if isinstance(payload, dict) else str(payload) + return {"text": str(text)} if text else {} + + +def _extractEmailList(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract email list from upstream output.""" + emails = upstream.get("emails") or [] + if not emails: + docs = upstream.get("documents") or upstream.get("documentList") or [] + if docs: + import json + for doc in docs: + raw = doc.get("documentData") if isinstance(doc, dict) else None + if raw: + try: + data = json.loads(raw) if isinstance(raw, str) else raw + if isinstance(data, dict): + found = (data.get("emails", {}).get("emails", []) + or data.get("searchResults", {}).get("results", [])) + if found: + emails = found + break + except (json.JSONDecodeError, TypeError): + pass + return {"emails": emails} if emails else {} + + +def _extractTaskList(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract task list from upstream output.""" + tasks = upstream.get("tasks") or [] + if not tasks: + docs = upstream.get("documents") or upstream.get("documentList") or [] + if docs: + import json + for doc in docs: + raw = doc.get("documentData") if isinstance(doc, dict) else None + if raw: + try: + data = json.loads(raw) if isinstance(raw, str) else raw + if isinstance(data, dict) and "tasks" in data: + tasks = data["tasks"] + break + except (json.JSONDecodeError, TypeError): + pass + return {"tasks": tasks} if tasks else {} + + +def _extractFileList(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract file list from upstream output.""" + files = upstream.get("files") or [] + return {"files": files} if files else {} + + +def _extractFormPayload(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract form payload from upstream output.""" + payload = upstream.get("payload") + if payload and isinstance(payload, dict): + return {"payload": payload} + return {} + + +def _extractAiResult(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract AI result fields from upstream output.""" + result = {} + for key in ("prompt", "response", "responseData", "context", "documents"): + if key in upstream: + result[key] = upstream[key] + return result + + +def _extractBoolResult(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract bool result from upstream output.""" + result = upstream.get("result") + if isinstance(result, bool): + return {"result": result, "reason": upstream.get("reason", "")} + approved = upstream.get("approved") + if isinstance(approved, bool): + return {"result": approved, "reason": upstream.get("reason", "")} + return {} + + +def _extractTaskResult(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract task result from upstream output.""" + result = {} + if "taskId" in upstream: + result["taskId"] = upstream["taskId"] + if "task" in upstream: + result["task"] = upstream["task"] + elif "clickupTask" in upstream: + result["task"] = upstream["clickupTask"] + if "success" in upstream: + result["success"] = upstream["success"] + return result + + +def _extractAggregateResult(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract aggregate result from upstream output.""" + items = upstream.get("items") or [] + return {"items": items, "count": len(items)} + + +def _extractMergeResult(upstream: Dict[str, Any]) -> Dict[str, Any]: + """Extract merge result from upstream output.""" + return { + "inputs": upstream.get("inputs", {}), + "first": upstream.get("first"), + "merged": upstream.get("merged", {}), + } + + +INPUT_EXTRACTORS: Dict[str, Callable] = { + "EmailDraft": _extractEmailDraft, + "DocumentList": _extractDocuments, + "TextResult": _extractText, + "EmailList": _extractEmailList, + "TaskList": _extractTaskList, + "FileList": _extractFileList, + "FormPayload": _extractFormPayload, + "AiResult": _extractAiResult, + "BoolResult": _extractBoolResult, + "TaskResult": _extractTaskResult, + "AggregateResult": _extractAggregateResult, + "MergeResult": _extractMergeResult, +} + + +# --------------------------------------------------------------------------- +# Transit helpers +# --------------------------------------------------------------------------- + +def _wrapTransit(data: Any, meta: Dict[str, Any]) -> Dict[str, Any]: + """Wrap data in a Transit envelope.""" + return {"_transit": True, "_meta": meta, "data": data} + + +def _unwrapTransit(output: Any) -> Any: + """Unwrap a Transit envelope, returning the inner data.""" + if isinstance(output, dict) and output.get("_transit"): + return output.get("data") + return output + + +def _resolveTransitChain( + nodeId: str, + nodeOutputs: Dict[str, Any], + connectionMap: Dict[str, list], +) -> Any: + """ + Follow _transit chain backwards until a real (non-transit) producer is found. + Returns the unwrapped output of the real producer. + """ + visited = set() + current = nodeId + while current and current not in visited: + visited.add(current) + out = nodeOutputs.get(current) + if not isinstance(out, dict) or not out.get("_transit"): + return out + sources = connectionMap.get(current, []) + if not sources: + return _unwrapTransit(out) + srcId = sources[0][0] if sources else None + if not srcId: + return _unwrapTransit(out) + current = srcId + return nodeOutputs.get(nodeId) + + +# --------------------------------------------------------------------------- +# Schema derivation for dynamic outputs +# --------------------------------------------------------------------------- + +def _deriveFormPayloadSchema(node: Dict[str, Any]) -> Optional[PortSchema]: + """Derive output schema from form field definitions.""" + fields_param = (node.get("parameters") or {}).get("fields") + if not fields_param or not isinstance(fields_param, list): + return None + portFields = [] + for f in fields_param: + if isinstance(f, dict) and f.get("name"): + portFields.append(PortField( + name=f["name"], + type=f.get("type", "str"), + description=f.get("label", {}) if isinstance(f.get("label"), dict) else {"en": str(f.get("label", f["name"]))}, + required=f.get("required", False), + )) + return PortSchema(name="FormPayload_dynamic", fields=portFields) if portFields else None + + +def _deriveTransformSchema(node: Dict[str, Any]) -> Optional[PortSchema]: + """Derive output schema from transform mappings.""" + mappings = (node.get("parameters") or {}).get("mappings") + if not mappings or not isinstance(mappings, list): + return None + portFields = [] + for m in mappings: + if isinstance(m, dict) and m.get("outputField"): + portFields.append(PortField( + name=m["outputField"], + type=m.get("type", "str"), + description={"en": m.get("label", m["outputField"])}, + )) + return PortSchema(name="Transform_dynamic", fields=portFields) if portFields else None diff --git a/modules/features/trustee/routeFeatureTrustee.py b/modules/features/trustee/routeFeatureTrustee.py index 13b28b07..ca8caf90 100644 --- a/modules/features/trustee/routeFeatureTrustee.py +++ b/modules/features/trustee/routeFeatureTrustee.py @@ -1642,6 +1642,22 @@ def get_import_status( return counts +# ===== AI Data Cache ===== + +@router.post("/{instanceId}/accounting/clear-cache") +@limiter.limit("10/minute") +def clear_ai_data_cache( + request: Request, + instanceId: str = Path(..., description="Feature Instance ID"), + context: RequestContext = Depends(getRequestContext), +) -> Dict[str, Any]: + """Clear the AI feature-data query cache for this instance so the next AI query reads fresh DB data.""" + _validateInstanceAccess(instanceId, context) + from modules.serviceCenter.services.serviceAgent.coreTools._featureSubAgentTools import clearFeatureQueryCache + removed = clearFeatureQueryCache(instanceId) + return {"cleared": removed, "featureInstanceId": instanceId} + + # ===== Position-Document Query ===== @router.get("/{instanceId}/positions/document/{documentId}", response_model=List[TrusteePosition]) diff --git a/modules/features/workspace/routeFeatureWorkspace.py b/modules/features/workspace/routeFeatureWorkspace.py index 064325a2..ae51f0cd 100644 --- a/modules/features/workspace/routeFeatureWorkspace.py +++ b/modules/features/workspace/routeFeatureWorkspace.py @@ -356,8 +356,6 @@ def _workspaceMessageToClientDict(msg: Any) -> Dict[str, Any]: raw = dict(msg) elif hasattr(msg, "model_dump"): raw = msg.model_dump() - elif hasattr(msg, "dict"): - raw = msg.dict() else: raw = { "id": getattr(msg, "id", None), @@ -378,8 +376,6 @@ def _workspaceMessageToClientDict(msg: Any) -> Dict[str, Any]: serialized_docs.append(doc) elif hasattr(doc, "model_dump"): serialized_docs.append(doc.model_dump()) - elif hasattr(doc, "dict"): - serialized_docs.append(doc.dict()) else: serialized_docs.append({ "id": getattr(doc, "id", ""), diff --git a/modules/interfaces/interfaceDbApp.py b/modules/interfaces/interfaceDbApp.py index f5560fed..f025b27c 100644 --- a/modules/interfaces/interfaceDbApp.py +++ b/modules/interfaces/interfaceDbApp.py @@ -563,6 +563,46 @@ class AppObjects: logger.error(f"Error getting user by ID: {str(e)}") return None + def getUsersByIds(self, userIds: list[str]) -> dict[str, User]: + """Batch-load users by IDs in a single SQL query (id = ANY(...)). + Returns {userId: User} dict. Skips IDs not found or not accessible.""" + if not userIds: + return {} + try: + uniqueIds = list(set(userIds)) + records = self.db.getRecordset(UserInDB, recordFilter={"id": uniqueIds}) + result: dict[str, User] = {} + for rec in (records or []): + cleaned = dict(rec) + if cleaned.get("roleLabels") is None: + cleaned["roleLabels"] = [] + uid = cleaned.get("id") + if uid: + result[uid] = User(**cleaned) + return result + except Exception as e: + logger.error(f"Error batch-loading users: {e}") + return {} + + def getMandatesByIds(self, mandateIds: list[str]) -> dict[str, Mandate]: + """Batch-load mandates by IDs in a single SQL query (id = ANY(...)). + Returns {mandateId: Mandate} dict.""" + if not mandateIds: + return {} + try: + uniqueIds = list(set(mandateIds)) + records = self.db.getRecordset(Mandate, recordFilter={"id": uniqueIds}) + result: dict[str, Mandate] = {} + for rec in (records or []): + cleaned = dict(rec) + mid = cleaned.get("id") + if mid: + result[mid] = Mandate(**cleaned) + return result + except Exception as e: + logger.error(f"Error batch-loading mandates: {e}") + return {} + def _getUserForAuthentication(self, username: str) -> Optional[Dict[str, Any]]: """ Get user record by username for authentication purposes. diff --git a/modules/interfaces/interfaceDbBilling.py b/modules/interfaces/interfaceDbBilling.py index 1ea1786a..e3229c08 100644 --- a/modules/interfaces/interfaceDbBilling.py +++ b/modules/interfaces/interfaceDbBilling.py @@ -1416,6 +1416,355 @@ class BillingObjects: return balances + @staticmethod + def _mapPaginationColumns(pagination: PaginationParams) -> PaginationParams: + """Remap frontend column names to DB column names in filters and sort.""" + _COL_MAP = {"createdAt": "sysCreatedAt"} + _ENRICHED_COLS = {"mandateName", "userName", "mandateId", "userId"} + import copy + p = copy.deepcopy(pagination) + if p.filters: + mapped = {} + for k, v in p.filters.items(): + if k in _ENRICHED_COLS: + continue + mapped[_COL_MAP.get(k, k)] = v + p.filters = mapped + if p.sort: + mapped = [] + for s in p.sort: + field = s.get("field", "") if isinstance(s, dict) else getattr(s, "field", "") + if field in _ENRICHED_COLS: + continue + newField = _COL_MAP.get(field, field) + if isinstance(s, dict): + mapped.append({**s, "field": newField}) + else: + mapped.append({"field": newField, "direction": getattr(s, "direction", "asc")}) + p.sort = mapped if mapped else [{"field": "sysCreatedAt", "direction": "desc"}] + return p + + def getTransactionsForMandatesPaginated( + self, + mandateIds: Optional[List[str]], + pagination: PaginationParams, + scope: str = "all", + userId: Optional[str] = None, + ) -> PaginatedResult: + """ + SQL-level paginated transactions across multiple mandates. + Single SQL query with WHERE accountId = ANY(...), ORDER BY, LIMIT/OFFSET. + Enrichment (userName, mandateName) only for the returned page. + """ + from modules.interfaces.interfaceDbApp import getInterface as getAppInterface + + try: + mappedPagination = self._mapPaginationColumns(pagination) + + allAccounts = self.db.getRecordset(BillingAccount) + if mandateIds: + allAccounts = [a for a in allAccounts if a.get("mandateId") in set(mandateIds)] + + accountIds = [a.get("id") for a in allAccounts if a.get("id")] + if not accountIds: + return PaginatedResult(items=[], totalItems=0, totalPages=0) + + recordFilter: Dict[str, Any] = {"accountId": accountIds} + if scope == "personal" and userId: + recordFilter["createdByUserId"] = userId + + result = self.db.getRecordsetPaginated( + BillingTransaction, + pagination=mappedPagination, + recordFilter=recordFilter, + ) + pageItems = result.get("items", []) if isinstance(result, dict) else result.items + + accountMap = {a.get("id"): a for a in allAccounts} + + pageUserIds = set() + pageMandateIds = set() + for t in pageItems: + accId = t.get("accountId") + acc = accountMap.get(accId, {}) + mid = acc.get("mandateId") + uid = t.get("createdByUserId") or acc.get("userId") + if uid: + pageUserIds.add(uid) + if mid: + pageMandateIds.add(mid) + + appInterface = getAppInterface(self.currentUser) + userMap: Dict[str, str] = {} + if pageUserIds: + users = appInterface.getUsersByIds(list(pageUserIds)) + for uid, u in users.items(): + dn = getattr(u, "displayName", None) or getattr(u, "username", None) or uid + userMap[uid] = dn + + mandateMap: Dict[str, str] = {} + if pageMandateIds: + mandates = appInterface.getMandatesByIds(list(pageMandateIds)) + for mid, m in mandates.items(): + mandateMap[mid] = getattr(m, "label", None) or getattr(m, "name", None) or mid + + enriched = [] + for t in pageItems: + row = dict(t) + accId = row.get("accountId") + acc = accountMap.get(accId, {}) + mid = acc.get("mandateId") + txUserId = row.get("createdByUserId") or acc.get("userId") + row["mandateId"] = mid + row["mandateName"] = mandateMap.get(mid, "") + row["userId"] = txUserId + row["userName"] = userMap.get(txUserId, txUserId) if txUserId else None + enriched.append(row) + + totalItems = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems + totalPages = result.get("totalPages", 0) if isinstance(result, dict) else result.totalPages + + return PaginatedResult(items=enriched, totalItems=totalItems, totalPages=totalPages) + + except Exception as e: + logger.error(f"Error in getTransactionsForMandatesPaginated: {e}") + return PaginatedResult(items=[], totalItems=0, totalPages=0) + + def _buildScopeFilter( + self, + mandateIds: Optional[List[str]], + scope: str = "all", + userId: Optional[str] = None, + startTs: Optional[float] = None, + endTs: Optional[float] = None, + ) -> tuple: + """Build WHERE clause parts for scoped transaction queries. Returns (conditions, values, accountIds).""" + allAccounts = self.db.getRecordset(BillingAccount) + if mandateIds: + mandateSet = set(mandateIds) + allAccounts = [a for a in allAccounts if a.get("mandateId") in mandateSet] + + accountIds = [a.get("id") for a in allAccounts if a.get("id")] + if not accountIds: + return [], [], [], allAccounts + + conditions = ['"accountId" = ANY(%s)', '"transactionType" = %s'] + values: list = [accountIds, "DEBIT"] + + if scope == "personal" and userId: + conditions.append('"createdByUserId" = %s') + values.append(userId) + + if startTs is not None: + conditions.append('"sysCreatedAt" >= %s') + values.append(startTs) + if endTs is not None: + conditions.append('"sysCreatedAt" < %s') + values.append(endTs) + + return conditions, values, accountIds, allAccounts + + def getTransactionStatisticsAggregated( + self, + mandateIds: Optional[List[str]], + scope: str = "all", + userId: Optional[str] = None, + startTs: Optional[float] = None, + endTs: Optional[float] = None, + period: str = "month", + ) -> Dict[str, Any]: + """ + Pure SQL aggregation for statistics. No row-level loading. + Returns: totalCost, transactionCount, costByProvider, costByModel, + costByFeature, costByAccountId, timeSeries + """ + table = BillingTransaction.__name__ + + try: + if not self.db._ensureTableExists(BillingTransaction): + return self._emptyStats() + + conditions, values, accountIds, allAccounts = self._buildScopeFilter( + mandateIds, scope, userId, startTs, endTs + ) + if not accountIds: + return self._emptyStats() + + whereClause = " WHERE " + " AND ".join(conditions) + self.db._ensure_connection() + + result: Dict[str, Any] = {} + + with self.db.connection.cursor() as cur: + # 1) Totals + cur.execute( + f'SELECT COALESCE(SUM("amount"), 0) AS total, COUNT(*) AS cnt FROM "{table}"{whereClause}', + values, + ) + row = cur.fetchone() + result["totalCost"] = round(float(row["total"]), 4) + result["transactionCount"] = int(row["cnt"]) + + # 2) GROUP BY aicoreProvider + cur.execute( + f'SELECT COALESCE("aicoreProvider", \'unknown\') AS grp, SUM("amount") AS total ' + f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC', + values, + ) + result["costByProvider"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()} + + # 3) GROUP BY aicoreModel + cur.execute( + f'SELECT COALESCE("aicoreModel", \'unknown\') AS grp, SUM("amount") AS total ' + f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC', + values, + ) + result["costByModel"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()} + + # 4) GROUP BY accountId (will be enriched to mandateName by caller) + cur.execute( + f'SELECT "accountId" AS grp, SUM("amount") AS total ' + f'FROM "{table}"{whereClause} GROUP BY grp ORDER BY total DESC', + values, + ) + result["costByAccountId"] = {r["grp"]: round(float(r["total"]), 4) for r in cur.fetchall()} + + # 5) GROUP BY accountId + featureCode (for costByFeature) + cur.execute( + f'SELECT "accountId", COALESCE("featureCode", \'unknown\') AS fc, SUM("amount") AS total ' + f'FROM "{table}"{whereClause} GROUP BY "accountId", fc ORDER BY total DESC', + values, + ) + result["costByAccountFeature"] = [ + {"accountId": r["accountId"], "featureCode": r["fc"], "total": round(float(r["total"]), 4)} + for r in cur.fetchall() + ] + + # 6) Time series via DATE_TRUNC on epoch timestamp + if period == "day": + truncExpr = "DATE_TRUNC('day', TO_TIMESTAMP(\"sysCreatedAt\"))" + else: + truncExpr = "DATE_TRUNC('month', TO_TIMESTAMP(\"sysCreatedAt\"))" + + cur.execute( + f'SELECT {truncExpr} AS bucket, SUM("amount") AS total, COUNT(*) AS cnt ' + f'FROM "{table}"{whereClause} AND "sysCreatedAt" IS NOT NULL ' + f'GROUP BY bucket ORDER BY bucket', + values, + ) + timeSeries = [] + for r in cur.fetchall(): + bucket = r["bucket"] + if period == "day": + label = bucket.strftime("%Y-%m-%d") if bucket else "unknown" + else: + label = bucket.strftime("%Y-%m") if bucket else "unknown" + timeSeries.append({ + "date": label, + "cost": round(float(r["total"]), 4), + "count": int(r["cnt"]), + }) + result["timeSeries"] = timeSeries + + self.db.connection.commit() + + result["_allAccounts"] = allAccounts + return result + + except Exception as e: + logger.error(f"Error in getTransactionStatisticsAggregated: {e}", exc_info=True) + try: + self.db.connection.rollback() + except Exception: + pass + return self._emptyStats() + + @staticmethod + def _emptyStats() -> Dict[str, Any]: + return { + "totalCost": 0.0, + "transactionCount": 0, + "costByProvider": {}, + "costByModel": {}, + "costByAccountId": {}, + "costByAccountFeature": [], + "timeSeries": [], + "_allAccounts": [], + } + + def getTransactionDistinctValues( + self, + mandateIds: Optional[List[str]], + column: str, + pagination: Optional[PaginationParams] = None, + scope: str = "all", + userId: Optional[str] = None, + ) -> List[str]: + """SQL DISTINCT for filter-values on BillingTransaction, scoped by mandates.""" + _COLUMN_MAP = { + "createdAt": "sysCreatedAt", + "mandateId": "accountId", + "mandateName": "accountId", + } + dbColumn = _COLUMN_MAP.get(column, column) + + mappedPagination = self._mapPaginationColumns(pagination) if pagination else None + + try: + allAccounts = self.db.getRecordset(BillingAccount) + if mandateIds: + allAccounts = [a for a in allAccounts if a.get("mandateId") in set(mandateIds)] + accountIds = [a.get("id") for a in allAccounts if a.get("id")] + if not accountIds: + return [] + + recordFilter: Dict[str, Any] = {"accountId": accountIds} + if scope == "personal" and userId: + recordFilter["createdByUserId"] = userId + + if column in ("mandateName", "userName"): + return self._getEnrichedDistinctValues(column, allAccounts, recordFilter, mappedPagination) + + return self.db.getDistinctColumnValues( + BillingTransaction, dbColumn, mappedPagination, recordFilter + ) + except Exception as e: + logger.error(f"Error in getTransactionDistinctValues({column}): {e}") + return [] + + def _getEnrichedDistinctValues( + self, + column: str, + allAccounts: List[Dict], + recordFilter: Dict[str, Any], + pagination: Optional[PaginationParams], + ) -> List[str]: + """Resolve enriched columns (mandateName, userName) via batch lookup.""" + from modules.interfaces.interfaceDbApp import getInterface as getAppInterface + + if column == "mandateName": + mandateIds = list({a.get("mandateId") for a in allAccounts if a.get("mandateId")}) + appInterface = getAppInterface(self.currentUser) + mandates = appInterface.getMandatesByIds(mandateIds) + return sorted( + {getattr(m, "label", None) or getattr(m, "name", "") or mid for mid, m in mandates.items()}, + key=lambda v: v.lower(), + ) + + if column == "userName": + dbCol = "createdByUserId" + values = self.db.getDistinctColumnValues(BillingTransaction, dbCol, pagination, recordFilter) + if not values: + return [] + appInterface = getAppInterface(self.currentUser) + users = appInterface.getUsersByIds(values) + return sorted( + {getattr(u, "displayName", None) or getattr(u, "username", None) or uid for uid, u in users.items()}, + key=lambda v: v.lower(), + ) + + return [] + def getUserTransactionsForMandates(self, mandateIds: List[str] = None, limit: int = 100) -> List[Dict[str, Any]]: """ Get all transactions for specified mandates. diff --git a/modules/interfaces/interfaceDbChat.py b/modules/interfaces/interfaceDbChat.py index 60f4db44..44fff815 100644 --- a/modules/interfaces/interfaceDbChat.py +++ b/modules/interfaces/interfaceDbChat.py @@ -1161,7 +1161,7 @@ class ChatObjects: data={ "type": "message", "createdAt": message_timestamp, - "item": chat_message.dict() + "item": chat_message.model_dump() }, event_category="chat" )) @@ -1535,7 +1535,7 @@ class ChatObjects: data={ "type": "log", "createdAt": log_timestamp, - "item": ChatLog(**createdLog).dict() + "item": ChatLog(**createdLog).model_dump() }, event_category="chat" )) diff --git a/modules/interfaces/interfaceDbManagement.py b/modules/interfaces/interfaceDbManagement.py index 4fd7c15c..0e6acb80 100644 --- a/modules/interfaces/interfaceDbManagement.py +++ b/modules/interfaces/interfaceDbManagement.py @@ -187,6 +187,7 @@ class ComponentObjects: try: # Initialize standard prompts self._initializeStandardPrompts() + self._seedUiLanguageSetsIfEmpty() # Add other record initializations here @@ -196,6 +197,44 @@ class ComponentObjects: # Don't raise the error, just log it # This allows the interface to be created even if initialization fails + def _seedUiLanguageSetsIfEmpty(self) -> None: + try: + import json + from pathlib import Path + + from modules.datamodels.datamodelUiLanguage import UiLanguageSet + + existing = self.db.getRecordset(UiLanguageSet) + if existing: + return + seedPath = ( + Path(__file__).resolve().parent.parent + / "migration" + / "seedData" + / "ui_language_seed.json" + ) + if not seedPath.is_file(): + logger.warning("ui_language_seed.json not found, skipping UI i18n seed") + return + payload = json.loads(seedPath.read_text(encoding="utf-8")) + now = getUtcTimestamp() + for row in payload: + rec = { + "id": row["id"], + "label": row["label"], + "keys": row.get("keys") or {}, + "status": row.get("status") or "complete", + "isDefault": bool(row.get("isDefault", False)), + "sysCreatedAt": now, + "sysModifiedBy": None, + "sysCreatedBy": None, + "sysModifiedAt": now, + } + self.db.recordCreate(UiLanguageSet, rec) + logger.info("Seeded UiLanguageSet rows from ui_language_seed.json") + except Exception as e: + logger.error(f"UI i18n seed failed: {e}") + def _initializeStandardPrompts(self): """Initializes standard prompts if they don't exist yet.""" try: diff --git a/modules/migration/seedData/ui_language_seed.json b/modules/migration/seedData/ui_language_seed.json new file mode 100644 index 00000000..6d43efb7 --- /dev/null +++ b/modules/migration/seedData/ui_language_seed.json @@ -0,0 +1,1915 @@ +[ + { + "id": "de", + "label": "Deutsch", + "keys": { + "Zentrale": "Zentrale", + "Dateien": "Dateien", + "Team-Bereich": "Team-Bereich", + "Verbindungen": "Verbindungen", + "Workflows": "Workflows", + "Einstellungen": "Einstellungen", + "SharePoint Test": "SharePoint Test", + "Sprache": "Sprache", + "Transkriptverwaltung": "Transkriptverwaltung", + "Darstellung": "Darstellung", + "Über": "Über", + "Version": "Version", + "Theme": "Theme", + "Wechseln Sie zwischen hellem und dunklem Modus": "Wechseln Sie zwischen hellem und dunklem Modus", + "Wählen Sie Ihre bevorzugte Sprache": "Wählen Sie Ihre bevorzugte Sprache", + "Hell": "Hell", + "Dunkel": "Dunkel", + "Zu hellem Modus wechseln": "Zu hellem Modus wechseln", + "Zu dunklem Modus wechseln": "Zu dunklem Modus wechseln", + "Benutzerinformationen": "Benutzerinformationen", + "Verwalten Sie Ihre Kontoinformationen": "Verwalten Sie Ihre Kontoinformationen", + "Benutzername": "Benutzername", + "Vollständiger Name": "Vollständiger Name", + "E-Mail-Adresse": "E-Mail-Adresse", + "Rufname am Telefon": "Rufname am Telefon", + "Wie möchten Sie am Telefon genannt werden?": "Wie möchten Sie am Telefon genannt werden?", + "Berechtigungsstufe": "Berechtigungsstufe", + "Kontostatus": "Kontostatus", + "Authentifizierungsanbieter": "Authentifizierungsanbieter", + "Aktiv": "Aktiv", + "Inaktiv": "Inaktiv", + "Benutzerinformationen werden geladen...": "Benutzerinformationen werden geladen...", + "Fehler beim Laden der Benutzerinformationen": "Fehler beim Laden der Benutzerinformationen", + "Änderungen speichern": "Änderungen speichern", + "Speichern...": "Speichern...", + "Benutzerinformationen erfolgreich aktualisiert": "Benutzerinformationen erfolgreich aktualisiert", + "Fehler beim Aktualisieren der Benutzerinformationen": "Fehler beim Aktualisieren der Benutzerinformationen", + "Verwaltet von {provider}": "Verwaltet von {provider}", + "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden": "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden", + "Deutsch": "Deutsch", + "English": "English", + "Français": "Français", + "Laden...": "Laden...", + "Fehler": "Fehler", + "Erfolgreich": "Erfolgreich", + "Abbrechen": "Abbrechen", + "Speichern": "Speichern", + "Löschen": "Löschen", + "Bearbeiten": "Bearbeiten", + "Schließen": "Schließen", + "Wiederholen": "Wiederholen", + "Erstellen": "Erstellen", + "Erstellen...": "Erstellen...", + "Kopiert": "Kopiert", + "Kopieren": "Kopieren", + "Details": "Details", + "k. A.": "k. A.", + "Nein": "Nein", + "Keine": "Keine", + "ausgewählt": "ausgewählt", + "Ja": "Ja", + "Keine Optionen verfügbar": "Keine Optionen verfügbar", + "Anmelden": "Anmelden", + "Registrieren": "Registrieren", + "Abmelden": "Abmelden", + "E-Mail": "E-Mail", + "Passwort": "Passwort", + "Prompt Vorlage": "Prompt Vorlage", + "Chatbereich": "Chatbereich", + "Workflow-Verlauf": "Workflow-Verlauf", + "Log": "Log", + "Workflow": "Workflow", + "Kein Workflow ausgewählt": "Kein Workflow ausgewählt", + "Logs werden geladen...": "Logs werden geladen...", + "Fehler beim Laden der Logs": "Fehler beim Laden der Logs", + "Keine Logs für diesen Workflow verfügbar": "Keine Logs für diesen Workflow verfügbar", + "Workflow läuft... Warte auf Logs...": "Workflow läuft... Warte auf Logs...", + "Logs konnten nicht geladen werden": "Logs konnten nicht geladen werden", + "INFO": "INFO", + "Workflow auswählen": "Workflow auswählen", + "Verfügbare Workflows": "Verfügbare Workflows", + "Keine Workflows verfügbar": "Keine Workflows verfügbar", + "Status": "Status", + "Runden": "Runden", + "Nachrichten": "Nachrichten", + "Token": "Token", + "Daten gesendet": "Daten gesendet", + "Daten empfangen": "Daten empfangen", + "Erfolgsrate": "Erfolgsrate", + "Gestartet": "Gestartet", + "Prompts werden geladen...": "Prompts werden geladen...", + "Fehler beim Laden der Prompts": "Fehler beim Laden der Prompts", + "Erneut versuchen": "Erneut versuchen", + "Neuer Prompt": "Neuer Prompt", + "Prompt": "Prompt", + "Prompts": "Prompts", + "Keine Prompts verfügbar": "Keine Prompts verfügbar", + "Erstellt": "Erstellt", + "Prompt ausführen": "Prompt ausführen", + "Prompt teilen": "Prompt teilen", + "Prompt löschen": "Prompt löschen", + "Klicken Sie erneut zum Bestätigen": "Klicken Sie erneut zum Bestätigen", + "Löschen...": "Löschen...", + "Zum Bestätigen klicken": "Zum Bestätigen klicken", + "Fehler beim Löschen": "Fehler beim Löschen", + "Prompt wird gelöscht...": "Prompt wird gelöscht...", + "Verwalten Sie Ihre Service-Verbindungen": "Verwalten Sie Ihre Service-Verbindungen", + "Google verbinden": "Google verbinden", + "Microsoft verbinden": "Microsoft verbinden", + "Google-Verbindung hinzufügen": "Google-Verbindung hinzufügen", + "Microsoft-Verbindung hinzufügen": "Microsoft-Verbindung hinzufügen", + "Google-Verbindung erstellen": "Google-Verbindung erstellen", + "Microsoft-Verbindung erstellen": "Microsoft-Verbindung erstellen", + "{authority} Verbindung bearbeiten": "{authority} Verbindung bearbeiten", + "Verbindung aktualisieren": "Verbindung aktualisieren", + "Service-Verbindungen": "Service-Verbindungen", + "Verbindungsfehler": "Verbindungsfehler", + "Trennungsfehler": "Trennungsfehler", + "Unbekannt": "Unbekannt", + "Nicht verfügbar": "Nicht verfügbar", + "Ungültiges Datum": "Ungültiges Datum", + "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?": "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?", + "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?": "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?", + "Service": "Service", + "Externer Benutzername": "Externer Benutzername", + "Externe E-Mail": "Externe E-Mail", + "Verbunden am": "Verbunden am", + "Zuletzt geprüft": "Zuletzt geprüft", + "Läuft ab am": "Läuft ab am", + "Google": "Google", + "Microsoft": "Microsoft", + "Lokal": "Lokal", + "Externen Benutzernamen eingeben": "Externen Benutzernamen eingeben", + "Externe E-Mail-Adresse eingeben": "Externe E-Mail-Adresse eingeben", + "Aktualisieren": "Aktualisieren", + "Verbinden": "Verbinden", + "Neuen Prompt erstellen": "Neuen Prompt erstellen", + "Name ist erforderlich": "Name ist erforderlich", + "Inhalt ist erforderlich": "Inhalt ist erforderlich", + "Fehler beim Erstellen des Prompts": "Fehler beim Erstellen des Prompts", + "Name": "Name", + "Inhalt": "Inhalt", + "Geben Sie einen Namen für den Prompt ein": "Geben Sie einen Namen für den Prompt ein", + "Geben Sie den Inhalt des Prompts ein": "Geben Sie den Inhalt des Prompts ein", + "Prompt erstellen": "Prompt erstellen", + "Benutzer auswählen": "Benutzer auswählen", + "Alle auswählen": "Alle auswählen", + "Alle abwählen": "Alle abwählen", + "Benutzer werden geladen...": "Benutzer werden geladen...", + "Fehler beim Laden der Benutzer": "Fehler beim Laden der Benutzer", + "Keine Benutzer verfügbar": "Keine Benutzer verfügbar", + "Bitte wählen Sie mindestens einen Benutzer aus": "Bitte wählen Sie mindestens einen Benutzer aus", + "1 Benutzer ausgewählt": "1 Benutzer ausgewählt", + "{count} Benutzer ausgewählt": "{count} Benutzer ausgewählt", + "Benutzerdefinierter Titel (optional)": "Benutzerdefinierter Titel (optional)", + "Geben Sie einen benutzerdefinierten Titel ein": "Geben Sie einen benutzerdefinierten Titel ein", + "Nachricht (optional)": "Nachricht (optional)", + "Fügen Sie eine Nachricht für die Empfänger hinzu": "Fügen Sie eine Nachricht für die Empfänger hinzu", + "Teilen": "Teilen", + "Wird geteilt...": "Wird geteilt...", + "Fehler beim Teilen des Prompts": "Fehler beim Teilen des Prompts", + "Prompt Einstellungen": "Prompt Einstellungen", + "Einstellungen werden in zukünftigen Updates hinzugefügt.": "Einstellungen werden in zukünftigen Updates hinzugefügt.", + "Gespräch fortsetzen...": "Gespräch fortsetzen...", + "Nachricht eingeben...": "Nachricht eingeben...", + "Datei entfernen": "Datei entfernen", + "Datei anhängen": "Datei anhängen", + "You": "You", + "Klicken Sie, um zu öffnen": "Klicken Sie, um zu öffnen", + "Dokument vorschauen": "Dokument vorschauen", + "Dokument herunterladen": "Dokument herunterladen", + "Workflow fehlgeschlagen.": "Workflow fehlgeschlagen.", + "Nochmal versuchen": "Nochmal versuchen", + "Folgenachricht wird gesendet...": "Folgenachricht wird gesendet...", + "Nachricht wird gesendet...": "Nachricht wird gesendet...", + "Fehler:": "Fehler:", + "Fehler beim Laden der Nachrichten:": "Fehler beim Laden der Nachrichten:", + "Workflow-Nachrichten werden geladen...": "Workflow-Nachrichten werden geladen...", + "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …": "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …", + "Oder geben Sie Ihre Nachricht ein...": "Oder geben Sie Ihre Nachricht ein...", + "Workflow wird fortgesetzt": "Workflow wird fortgesetzt", + "Datei": "Datei", + "angehängt": "angehängt", + "Dateien anhängen": "Dateien anhängen", + "Wird gesendet...": "Wird gesendet...", + "Wird verarbeitet...": "Wird verarbeitet...", + "Fortsetzen": "Fortsetzen", + "Senden": "Senden", + "Stoppen": "Stoppen", + "Wird gestoppt...": "Wird gestoppt...", + "Dateien hier ablegen zum Anhängen": "Dateien hier ablegen zum Anhängen", + "Datei-Ablage während Workflow deaktiviert": "Datei-Ablage während Workflow deaktiviert", + "Chat leeren...": "Chat leeren...", + "Verwende Vorlage:": "Verwende Vorlage:", + "Prompt auswählen...": "Prompt auswählen...", + "Vorschau wird geladen...": "Vorschau wird geladen...", + "Keine Vorschau verfügbar": "Keine Vorschau verfügbar", + "Vorschau schließen": "Vorschau schließen", + "Python": "Python", + "Workflows werden geladen...": "Workflows werden geladen...", + "Fehler beim Laden der Workflows:": "Fehler beim Laden der Workflows:", + "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?": "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?", + "Kein Nachrichteninhalt verfügbar": "Kein Nachrichteninhalt verfügbar", + "Unbekanntes Datum": "Unbekanntes Datum", + "Gestartet:": "Gestartet:", + "Letzte Aktivität:": "Letzte Aktivität:", + "Runde": "Runde", + "Workflow fortsetzen": "Workflow fortsetzen", + "Workflow löschen": "Workflow löschen", + "Workflow wird gelöscht...": "Workflow wird gelöscht...", + "Noch keinen Workflow ausgewählt": "Noch keinen Workflow ausgewählt", + "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow": "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow", + "Lade Fortschritt...": "Lade Fortschritt...", + "Aufgaben": "Aufgaben", + "Workflow Fortschritt": "Workflow Fortschritt", + "Analysiere Workflow...": "Analysiere Workflow...", + "Nach unten scrollen": "Nach unten scrollen", + "FEHLER": "FEHLER", + "FEHLGESCHLAGEN": "FEHLGESCHLAGEN", + "GESTOPPT": "GESTOPPT", + "ABGEBROCHEN": "ABGEBROCHEN", + "LÄUFT": "LÄUFT", + "VERARBEITUNG": "VERARBEITUNG", + "ABGESCHLOSSEN": "ABGESCHLOSSEN", + "WARTEND": "WARTEND", + "Unbekannte Größe": "Unbekannte Größe", + "Hochgeladen": "Hochgeladen", + "KI-erstellt": "KI-erstellt", + "Geteilt": "Geteilt", + "Datei vorschauen": "Datei vorschauen", + "Datei herunterladen": "Datei herunterladen", + "Datei löschen": "Datei löschen", + "Klicken Sie erneut zum Bestätigen der Löschung": "Klicken Sie erneut zum Bestätigen der Löschung", + "Zum Bestätigen klicken...": "Zum Bestätigen klicken...", + "Keine Dateien gefunden.": "Keine Dateien gefunden.", + "Keine mit Ihnen geteilten Dateien gefunden.": "Keine mit Ihnen geteilten Dateien gefunden.", + "Keine von der KI erstellten Dateien gefunden.": "Keine von der KI erstellten Dateien gefunden.", + "Keine hochgeladenen Dateien gefunden.": "Keine hochgeladenen Dateien gefunden.", + "Typ": "Typ", + "Größe": "Größe", + "Datum": "Datum", + "Dateien auswählen": "Dateien auswählen", + "Alle Dateien": "Alle Dateien", + "Neue Datei hochladen": "Neue Datei hochladen", + "Dateien werden geladen...": "Dateien werden geladen...", + "Fehler beim Laden der Dateien:": "Fehler beim Laden der Dateien:", + "Datei hochladen": "Datei hochladen", + "Datei hier ablegen...": "Datei hier ablegen...", + "Lädt hoch...": "Lädt hoch...", + "Dateien hierher ziehen": "Dateien hierher ziehen", + "oder": "oder", + "Durchsuchen": "Durchsuchen", + "Ausgewählte Datei:": "Ausgewählte Datei:", + "Hochladen": "Hochladen", + "Wird hochgeladen...": "Wird hochgeladen...", + "Datei erfolgreich hochgeladen!": "Datei erfolgreich hochgeladen!", + "Beim Hochladen ist ein Fehler aufgetreten.": "Beim Hochladen ist ein Fehler aufgetreten.", + "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.": "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.", + "Dateien hier ablegen": "Dateien hier ablegen", + "Dateien hochladen": "Dateien hochladen", + "Meine Uploads": "Meine Uploads", + "Erstellte Dateien": "Erstellte Dateien", + "Geteilte Dateien": "Geteilte Dateien", + "Datei hinzufügen": "Datei hinzufügen", + "Dateiname": "Dateiname", + "MIME-Typ": "MIME-Typ", + "Dateigröße": "Dateigröße", + "Erstellungsdatum": "Erstellungsdatum", + "Quelle": "Quelle", + "Bild": "Bild", + "PDF": "PDF", + "Dokument": "Dokument", + "Tabelle": "Tabelle", + "Text": "Text", + "Video": "Video", + "Audio": "Audio", + "Vorschau": "Vorschau", + "Herunterladen": "Herunterladen", + "Entfernen": "Entfernen", + "Datei bearbeiten": "Datei bearbeiten", + "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?": "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?", + "Dateivorschau": "Dateivorschau", + "Vorschau für diesen Dateityp nicht verfügbar": "Vorschau für diesen Dateityp nicht verfügbar", + "Fehler beim Laden der Vorschau": "Fehler beim Laden der Vorschau", + "Textvorschau": "Textvorschau", + "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.": "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.", + "In die Zwischenablage kopiert": "In die Zwischenablage kopiert", + "Ungültiges JSON": "Ungültiges JSON", + "Eigenschaften": "Eigenschaften", + "Rohtext in die Zwischenablage kopieren": "Rohtext in die Zwischenablage kopieren", + "Keine Workflows gefunden": "Keine Workflows gefunden", + "ID": "ID", + "Letzte Aktivität": "Letzte Aktivität", + "Läuft": "Läuft", + "Abgeschlossen": "Abgeschlossen", + "Fehlgeschlagen": "Fehlgeschlagen", + "Gestoppt": "Gestoppt", + "Wartend": "Wartend", + "Workflow stoppen": "Workflow stoppen", + "Unbenannter Workflow": "Unbenannter Workflow", + "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?": "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?", + "Zum Ein-/Ausklappen klicken": "Zum Ein-/Ausklappen klicken", + "Suchen...": "Suchen...", + "Daten aktualisieren": "Daten aktualisieren", + "Filter löschen": "Filter löschen", + "{column} filtern": "{column} filtern", + "Aktionen": "Aktionen", + "Seite {page} von {total} ({count} Einträge)": "Seite {page} von {total} ({count} Einträge)", + "Einträge pro Seite:": "Einträge pro Seite:", + "Erste Seite": "Erste Seite", + "Vorherige Seite": "Vorherige Seite", + "Nächste Seite": "Nächste Seite", + "Letzte Seite": "Letzte Seite", + "Alle Elemente auswählen": "Alle Elemente auswählen", + "Dieses Element auswählen": "Dieses Element auswählen", + "Dieses Element kann nicht ausgewählt werden": "Dieses Element kann nicht ausgewählt werden", + "Löschen ({count})": "Löschen ({count})", + "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?": "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?", + "Sind Sie sicher, dass Sie das ausgewählte Element löschen möchten?": "Sind Sie sicher, dass Sie das ausgewählte Element löschen möchten?", + "Alle {count} Elemente löschen": "Alle {count} Elemente löschen", + "Keine Daten verfügbar": "Keine Daten verfügbar", + "Alle Daten als CSV exportieren": "Alle Daten als CSV exportieren", + "Exportiere...": "Exportiere...", + "Filter": "Filter", + "Filter: {value}": "Filter: {value}", + "Alle": "Alle", + "Zum Filtern klicken": "Zum Filtern klicken", + "Von": "Von", + "Lade Filterwerte...": "Lade Filterwerte...", + "Bis": "Bis", + "{fieldLabel} ist erforderlich": "{fieldLabel} ist erforderlich", + "{fieldLabel} muss eine gültige Ganzzahl sein": "{fieldLabel} muss eine gültige Ganzzahl sein", + "{fieldLabel} muss eine gültige Zahl sein": "{fieldLabel} muss eine gültige Zahl sein", + "Ungültiges E-Mail-Format": "Ungültiges E-Mail-Format", + "Ungültige URL": "Ungültige URL", + "Ungültige Auswahl": "Ungültige Auswahl", + "Ungültiges Datumsformat": "Ungültiges Datumsformat", + "Einträge": "Einträge", + "Seite": "Seite", + "Sortierung {position}: {direction}": "Sortierung {position}: {direction}", + "Zum Sortieren klicken": "Zum Sortieren klicken", + "Prompts verwalten": "Prompts verwalten", + "Prompts für Ihren KI-Assistenten erstellen und verwalten": "Prompts für Ihren KI-Assistenten erstellen und verwalten", + "Prompt hinzufügen": "Prompt hinzufügen", + "Mandat-ID": "Mandat-ID", + "Unbenannt": "Unbenannt", + "Keine Berechtigung zum Löschen des Prompts": "Keine Berechtigung zum Löschen des Prompts", + "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?": "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?", + "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?": "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?", + "Prompt-Name": "Prompt-Name", + "Prompt-Inhalt": "Prompt-Inhalt", + "Prompt-Name darf nicht leer sein": "Prompt-Name darf nicht leer sein", + "Prompt-Name darf 100 Zeichen nicht überschreiten": "Prompt-Name darf 100 Zeichen nicht überschreiten", + "Prompt-Inhalt darf nicht leer sein": "Prompt-Inhalt darf nicht leer sein", + "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten": "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten", + "Fehler beim Laden der Prompts:": "Fehler beim Laden der Prompts:", + "Prompt bearbeiten": "Prompt bearbeiten", + "Prompt erfolgreich erstellt": "Prompt erfolgreich erstellt", + "Benutzer": "Benutzer", + "Berechtigung": "Berechtigung", + "Aktiviert": "Aktiviert", + "Auth-Anbieter": "Auth-Anbieter", + "Passwort eingeben": "Passwort eingeben", + "Kein Benutzername": "Kein Benutzername", + "Kein Name": "Kein Name", + "Keine E-Mail": "Keine E-Mail", + "Keine Sprache": "Keine Sprache", + "Keine Berechtigung": "Keine Berechtigung", + "Kein Auth-Anbieter": "Kein Auth-Anbieter", + "Betrachter": "Betrachter", + "Administrator": "Administrator", + "Systemadministrator": "Systemadministrator", + "Benutzer bearbeiten": "Benutzer bearbeiten", + "Benutzer hinzufügen": "Benutzer hinzufügen", + "Benutzer erstellen": "Benutzer erstellen", + "Benutzer löschen": "Benutzer löschen", + "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?": "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?", + "Diese Aktion kann nicht rückgängig gemacht werden.": "Diese Aktion kann nicht rückgängig gemacht werden.", + "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?": "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?", + "Fehler beim Laden der Benutzer:": "Fehler beim Laden der Benutzer:", + "Team-Mitglieder": "Team-Mitglieder", + "Team-Mitglieder verwalten": "Team-Mitglieder verwalten", + "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren": "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren", + "Mitglied hinzufügen": "Mitglied hinzufügen", + "Passwort-Link senden": "Passwort-Link senden", + "Passwort-Link gesendet!": "Passwort-Link gesendet!", + "Link konnte nicht gesendet werden": "Link konnte nicht gesendet werden", + "Neues Team-Mitglied erstellen": "Neues Team-Mitglied erstellen", + "Team-Mitglied erfolgreich erstellt": "Team-Mitglied erfolgreich erstellt", + "Fehler beim Erstellen des Team-Mitglieds": "Fehler beim Erstellen des Team-Mitglieds", + "SharePoint Dokumente": "SharePoint Dokumente", + "Fehler beim Laden der SharePoint Dokumente:": "Fehler beim Laden der SharePoint Dokumente:", + "Verbindung testen": "Verbindung testen", + "Dokumente auflisten": "Dokumente auflisten", + "Sites entdecken": "Sites entdecken", + "Dokumentname": "Dokumentname", + "Pfad": "Pfad", + "Anzeigen": "Anzeigen", + "Microsoft Verbindungen": "Microsoft Verbindungen", + "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.": "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.", + "Verbindungen werden geladen...": "Verbindungen werden geladen...", + "Entdeckte Sites": "Entdeckte Sites", + "Keine SharePoint-Sites gefunden": "Keine SharePoint-Sites gefunden", + "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.": "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.", + "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.": "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.", + "SharePoint Site URL": "SharePoint Site URL", + "Ordnerpfade": "Ordnerpfade", + "Sprach Integration": "Sprach Integration", + "Unterstützt von": "Unterstützt von", + "Virtual Assistant (VA)": "Virtual Assistant (VA)", + "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.": "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.", + "Speech Analytics (SA)": "Speech Analytics (SA)", + "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.": "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.", + "Voice Biometrics (VB)": "Voice Biometrics (VB)", + "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.": "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.", + "Knowledge Agent (KA)": "Knowledge Agent (KA)", + "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.": "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.", + "Chat Platform (CP)": "Chat Platform (CP)", + "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.": "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.", + "Agent Assist (AA)": "Agent Assist (AA)", + "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.": "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.", + "Revolutionäre Telefonie-Integration mit Spitch.ai": "Revolutionäre Telefonie-Integration mit Spitch.ai", + "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.": "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.", + "Nahtloser Mandanten-Workflow:": "Nahtloser Mandanten-Workflow:", + "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.": "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.", + "KI-gestützte Dokumentengenerierung:": "KI-gestützte Dokumentengenerierung:", + "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.": "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.", + "Echtzeit-Datensynchronisation:": "Echtzeit-Datensynchronisation:", + "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.": "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.", + "Kosteneinsparungen & Effizienz:": "Kosteneinsparungen & Effizienz:", + "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.": "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.", + "Mehr erfahren": "Mehr erfahren", + "Zurück zur Sprach Integration": "Zurück zur Sprach Integration", + "Mandat erstellen": "Mandat erstellen", + "Unternehmensinformationen": "Unternehmensinformationen", + "Firmenname": "Firmenname", + "Geben Sie Ihren Firmennamen ein": "Geben Sie Ihren Firmennamen ein", + "Branche": "Branche", + "z.B. Finanzdienstleistungen, Technologie, etc.": "z.B. Finanzdienstleistungen, Technologie, etc.", + "Geschäftszeiten": "Geschäftszeiten", + "Zeitzone": "Zeitzone", + "Kontaktinformationen": "Kontaktinformationen", + "kontakt@firma.com": "kontakt@firma.com", + "Telefonnummer": "Telefonnummer", + "+41 123 456 789": "+41 123 456 789", + "Straße": "Straße", + "Postleitzahl": "Postleitzahl", + "Stadt": "Stadt", + "Land": "Land", + "Kontakte einrichten": "Kontakte einrichten", + "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.": "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.", + "Jetzt überspringen": "Jetzt überspringen", + "Firmenname ist erforderlich": "Firmenname ist erforderlich", + "Branche ist erforderlich": "Branche ist erforderlich", + "E-Mail-Adresse ist erforderlich": "E-Mail-Adresse ist erforderlich", + "Bitte geben Sie eine gültige E-Mail-Adresse ein": "Bitte geben Sie eine gültige E-Mail-Adresse ein", + "Telefonnummer ist erforderlich": "Telefonnummer ist erforderlich", + "Straße ist erforderlich": "Straße ist erforderlich", + "Postleitzahl ist erforderlich": "Postleitzahl ist erforderlich", + "Stadt ist erforderlich": "Stadt ist erforderlich", + "Land ist erforderlich": "Land ist erforderlich", + "✓ Mandat eingereicht": "✓ Mandat eingereicht", + "Neu starten": "Neu starten", + "Mandat erfolgreich eingereicht!": "Mandat erfolgreich eingereicht!", + "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.": "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.", + "Eingereichte Daten:": "Eingereichte Daten:", + "Firma": "Firma", + "Telefon": "Telefon", + "Adresse": "Adresse", + "Was passiert als nächstes?": "Was passiert als nächstes?", + "E-Mail-Bestätigung": "E-Mail-Bestätigung", + "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.": "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.", + "Überprüfungsprozess": "Überprüfungsprozess", + "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.": "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.", + "Einrichtungsanruf": "Einrichtungsanruf", + "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.": "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.", + "Fragen?": "Fragen?", + "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.": "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.", + "Sprach-Einstellungen": "Sprach-Einstellungen", + "Neues Transkript": "Neues Transkript", + "Aktuelle Transkripte": "Aktuelle Transkripte", + "Keine Transkripte vorhanden": "Keine Transkripte vorhanden", + "Dauer": "Dauer", + "Transkript": "Transkript", + "Transkript wird verarbeitet...": "Transkript wird verarbeitet...", + "Verarbeitung": "Verarbeitung", + "Zugriff verweigert": "Zugriff verweigert", + "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.": "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.", + "Jetzt anmelden": "Jetzt anmelden", + "Betreff": "Betreff", + "Startzeit": "Startzeit", + "Endzeit": "Endzeit", + "Anrufer": "Anrufer", + "Empfänger": "Empfänger", + "Tags": "Tags", + "Sprach-Integration Einstellungen": "Sprach-Integration Einstellungen", + "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.": "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.", + "Geschäftszeiten & Zeitzone": "Geschäftszeiten & Zeitzone", + "Einstellungen erfolgreich gespeichert!": "Einstellungen erfolgreich gespeichert!", + "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.": "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.", + "Auf Standard zurücksetzen": "Auf Standard zurücksetzen", + "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.": "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.", + "Einstellungen wurden erfolgreich zurückgesetzt.": "Einstellungen wurden erfolgreich zurückgesetzt.", + "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.": "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.", + "Information": "Information", + "Ihre Anfrage wird verarbeitet...": "Ihre Anfrage wird verarbeitet...", + "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.": "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.", + "Datei bereits vorhanden": "Datei bereits vorhanden", + "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.": "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.", + "Automatisierungen": "Automatisierungen", + "Workflow-Automatisierungen verwalten": "Workflow-Automatisierungen verwalten", + "Geplante und automatisierte Workflows": "Geplante und automatisierte Workflows", + "Neue Automatisierung": "Neue Automatisierung", + "Ausführen": "Ausführen", + "Neue Automatisierung erstellen": "Neue Automatisierung erstellen", + "Automatisierung erfolgreich erstellt": "Automatisierung erfolgreich erstellt", + "Fehler beim Erstellen der Automatisierung": "Fehler beim Erstellen der Automatisierung", + "Basisdaten": "Basisdaten", + "Grundlegende Daten und Ressourcen": "Grundlegende Daten und Ressourcen", + "Werkzeuge": "Werkzeuge", + "Werkzeuge und Hilfsmittel": "Werkzeuge und Hilfsmittel", + "Verwaltungs- und Management-Tools": "Verwaltungs- und Management-Tools", + "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.": "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.", + "Verfügbare Tools": "Verfügbare Tools", + "Management-Tools umfassen:": "Management-Tools umfassen:", + "Dateiverwaltung - Dokumente hochladen und organisieren": "Dateiverwaltung - Dokumente hochladen und organisieren", + "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten": "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten", + "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren": "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren", + "Datenverwaltung - Datenimporte und -exporte verwalten": "Datenverwaltung - Datenimporte und -exporte verwalten", + "Mandate": "Mandate", + "Mandate und Berechtigungen verwalten": "Mandate und Berechtigungen verwalten", + "Mandatsverwaltung": "Mandatsverwaltung", + "Verwalten Sie Mandate und deren zugehörige Berechtigungen.": "Verwalten Sie Mandate und deren zugehörige Berechtigungen.", + "Mandat hinzufügen": "Mandat hinzufügen", + "Neues Mandat erstellen": "Neues Mandat erstellen", + "Mandat erfolgreich erstellt": "Mandat erfolgreich erstellt", + "Fehler beim Erstellen des Mandats": "Fehler beim Erstellen des Mandats", + "RBAC-Regeln": "RBAC-Regeln", + "Rollenbasierte Zugriffssteuerungsregeln": "Rollenbasierte Zugriffssteuerungsregeln", + "RBAC-Regelverwaltung": "RBAC-Regelverwaltung", + "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.": "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.", + "RBAC-Regel hinzufügen": "RBAC-Regel hinzufügen", + "Neue RBAC-Regel erstellen": "Neue RBAC-Regel erstellen", + "RBAC-Regel erfolgreich erstellt": "RBAC-Regel erfolgreich erstellt", + "Fehler beim Erstellen der RBAC-Regel": "Fehler beim Erstellen der RBAC-Regel", + "RBAC-Rollen": "RBAC-Rollen", + "Rollenverwaltung": "Rollenverwaltung", + "RBAC-Rollenverwaltung": "RBAC-Rollenverwaltung", + "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.": "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.", + "Rolle hinzufügen": "Rolle hinzufügen", + "Neue Rolle erstellen": "Neue Rolle erstellen", + "Rolle erfolgreich erstellt": "Rolle erfolgreich erstellt", + "Fehler beim Erstellen der Rolle": "Fehler beim Erstellen der Rolle", + "Admin-Einstellungen": "Admin-Einstellungen", + "Administrative Einstellungen": "Administrative Einstellungen", + "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.": "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.", + "Start": "Start", + "Willkommen in Ihrem Arbeitsbereich": "Willkommen in Ihrem Arbeitsbereich", + "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.": "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.", + "Schnellzugriff": "Schnellzugriff", + "Beginnen Sie mit:": "Beginnen Sie mit:", + "Schnellzugriff - Springen Sie zu häufig verwendeten Features": "Schnellzugriff - Springen Sie zu häufig verwendeten Features", + "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit": "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit", + "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates": "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates", + "Navigation - Erkunden Sie alle verfügbaren Tools": "Navigation - Erkunden Sie alle verfügbaren Tools", + "Projekte": "Projekte", + "Projektverwaltung": "Projektverwaltung", + "Projektverwaltung und -organisation": "Projektverwaltung und -organisation", + "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.": "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.", + "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")": "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")", + "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.": "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.", + "Datenverwaltung": "Datenverwaltung", + "Datenverwaltung mit Tabellen": "Datenverwaltung mit Tabellen", + "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.": "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.", + "Sie können auch auf den Upload-Button klicken": "Sie können auch auf den Upload-Button klicken", + "Dateien werden verarbeitet...": "Dateien werden verarbeitet...", + "Fehler beim Verarbeiten der Dateien": "Fehler beim Verarbeiten der Dateien", + "Treuhand": "Treuhand", + "Treuhandverwaltung": "Treuhandverwaltung", + "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen": "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen", + "Organisationen": "Organisationen", + "Trustee-Organisationen verwalten": "Trustee-Organisationen verwalten", + "Verwaltung der Treuhand-Organisationen": "Verwaltung der Treuhand-Organisationen", + "Neue Organisation": "Neue Organisation", + "z.B. treuhand-ag-zuerich": "z.B. treuhand-ag-zuerich", + "Bezeichnung": "Bezeichnung", + "z.B. Treuhand AG Zürich": "z.B. Treuhand AG Zürich", + "Neue Organisation erstellen": "Neue Organisation erstellen", + "Organisation erfolgreich erstellt": "Organisation erfolgreich erstellt", + "Fehler beim Erstellen der Organisation": "Fehler beim Erstellen der Organisation", + "Rollen": "Rollen", + "Trustee-Rollen verwalten": "Trustee-Rollen verwalten", + "Verwaltung der Feature-spezifischen Rollen": "Verwaltung der Feature-spezifischen Rollen", + "Neue Rolle": "Neue Rolle", + "Rollen-ID": "Rollen-ID", + "z.B. admin, operate, userreport": "z.B. admin, operate, userreport", + "Beschreibung": "Beschreibung", + "Beschreibung der Rolle": "Beschreibung der Rolle", + "Zugriff": "Zugriff", + "Benutzer-Zugriff verwalten": "Benutzer-Zugriff verwalten", + "Verwaltung der Benutzerzugriffe auf Organisationen": "Verwaltung der Benutzerzugriffe auf Organisationen", + "Neuer Zugriff": "Neuer Zugriff", + "Organisation": "Organisation", + "Rolle": "Rolle", + "Vertrag (optional)": "Vertrag (optional)", + "Leer = Zugriff auf alle Verträge": "Leer = Zugriff auf alle Verträge", + "Neuen Zugriff erstellen": "Neuen Zugriff erstellen", + "Zugriff erfolgreich erstellt": "Zugriff erfolgreich erstellt", + "Fehler beim Erstellen des Zugriffs": "Fehler beim Erstellen des Zugriffs", + "Verträge": "Verträge", + "Kundenverträge verwalten": "Kundenverträge verwalten", + "Verwaltung der Kundenverträge": "Verwaltung der Kundenverträge", + "Neuer Vertrag": "Neuer Vertrag", + "z.B. Muster AG 2026": "z.B. Muster AG 2026", + "Neuen Vertrag erstellen": "Neuen Vertrag erstellen", + "Vertrag erfolgreich erstellt": "Vertrag erfolgreich erstellt", + "Fehler beim Erstellen des Vertrags": "Fehler beim Erstellen des Vertrags", + "Dokumente": "Dokumente", + "Belege verwalten": "Belege verwalten", + "Verwaltung der Dokumente und Belege": "Verwaltung der Dokumente und Belege", + "Neues Dokument": "Neues Dokument", + "Vertrag": "Vertrag", + "z.B. Beleg.pdf": "z.B. Beleg.pdf", + "Dateityp": "Dateityp", + "Neues Dokument erstellen": "Neues Dokument erstellen", + "Dokument erfolgreich erstellt": "Dokument erfolgreich erstellt", + "Fehler beim Erstellen des Dokuments": "Fehler beim Erstellen des Dokuments", + "Positionen": "Positionen", + "Buchungspositionen verwalten": "Buchungspositionen verwalten", + "Verwaltung der Buchungspositionen (Speseneinträge)": "Verwaltung der Buchungspositionen (Speseneinträge)", + "Neue Position": "Neue Position", + "Valutadatum": "Valutadatum", + "Name des Unternehmens": "Name des Unternehmens", + "Buchungswährung": "Buchungswährung", + "Buchungsbetrag": "Buchungsbetrag", + "Originalwährung": "Originalwährung", + "Originalbetrag": "Originalbetrag", + "MwSt %": "MwSt %", + "MwSt Betrag": "MwSt Betrag", + "Neue Position erstellen": "Neue Position erstellen", + "Position erfolgreich erstellt": "Position erfolgreich erstellt", + "Fehler beim Erstellen der Position": "Fehler beim Erstellen der Position", + "UI-Sprachen": "UI-Sprachen", + "Globale Sprachsets verwalten (SysAdmin).": "Globale Sprachsets verwalten (SysAdmin).", + "Alle aktualisieren": "Alle aktualisieren", + "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?": "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?", + "Neue Sprache": "Neue Sprache", + "Anzeigename": "Anzeigename", + "Hinzufügen": "Hinzufügen", + "Keine Einträge": "Keine Einträge", + "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?": "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?", + "Sprachset {code} wirklich löschen?": "Sprachset {code} wirklich löschen?", + "Fortfahren": "Fortfahren" + }, + "status": "complete", + "isDefault": true + }, + { + "id": "en", + "label": "English", + "keys": { + "Zentrale": "Dashboard", + "Dateien": "Files", + "Team-Bereich": "Team Area", + "Verbindungen": "Connections", + "Workflows": "Workflows", + "Einstellungen": "Settings", + "SharePoint Test": "SharePoint Test", + "Sprache": "Language", + "Transkriptverwaltung": "Transcript Management", + "Darstellung": "Appearance", + "Über": "About", + "Version": "Version", + "Theme": "Theme", + "Wechseln Sie zwischen hellem und dunklem Modus": "Switch between light and dark mode", + "Wählen Sie Ihre bevorzugte Sprache": "Choose your preferred language", + "Hell": "Light", + "Dunkel": "Dark", + "Zu hellem Modus wechseln": "Switch to light mode", + "Zu dunklem Modus wechseln": "Switch to dark mode", + "Benutzerinformationen": "User Information", + "Verwalten Sie Ihre Kontoinformationen": "Manage your account information", + "Benutzername": "Username", + "Vollständiger Name": "Full Name", + "E-Mail-Adresse": "Email Address", + "Rufname am Telefon": "Phone Name", + "Wie möchten Sie am Telefon genannt werden?": "How would you like to be called on the phone?", + "Berechtigungsstufe": "Privilege Level", + "Kontostatus": "Account Status", + "Authentifizierungsanbieter": "Authentication Provider", + "Aktiv": "Active", + "Inaktiv": "Inactive", + "Benutzerinformationen werden geladen...": "Loading user information...", + "Fehler beim Laden der Benutzerinformationen": "Error loading user information", + "Änderungen speichern": "Save Changes", + "Speichern...": "Saving...", + "Benutzerinformationen erfolgreich aktualisiert": "User information updated successfully", + "Fehler beim Aktualisieren der Benutzerinformationen": "Error updating user information", + "Verwaltet von {provider}": "Managed by {provider}", + "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden": "This field is managed by {provider} and cannot be changed", + "Deutsch": "Deutsch", + "English": "English", + "Français": "Français", + "Laden...": "Downloading...", + "Fehler": "Error", + "Erfolgreich": "Success", + "Abbrechen": "Cancel", + "Speichern": "Save", + "Löschen": "Delete", + "Bearbeiten": "Edit", + "Schließen": "Close", + "Wiederholen": "Retry", + "Erstellen": "Create", + "Erstellen...": "Creating...", + "Anmelden": "Login", + "Registrieren": "Register", + "Abmelden": "Logout", + "E-Mail": "Email", + "Passwort": "Password", + "Prompt Vorlage": "Prompt Template", + "Chatbereich": "Chat Area", + "Workflow-Verlauf": "Workflow History", + "Log": "Log", + "Workflow": "Workflow", + "Kein Workflow ausgewählt": "No workflow selected", + "Logs werden geladen...": "Loading logs...", + "Fehler beim Laden der Logs": "Error loading logs", + "Keine Logs für diesen Workflow verfügbar": "No logs available for this workflow", + "Workflow läuft... Warte auf Logs...": "Workflow running... Waiting for logs...", + "Logs konnten nicht geladen werden": "Failed to fetch logs", + "INFO": "INFO", + "Workflow auswählen": "Select Workflow", + "Verfügbare Workflows": "Available Workflows", + "Keine Workflows verfügbar": "No workflows available", + "Status": "Status", + "Runden": "Rounds", + "Nachrichten": "Messages", + "Token": "Tokens", + "Daten gesendet": "Data Sent", + "Daten empfangen": "Data Received", + "Erfolgsrate": "Success Rate", + "Gestartet": "Started", + "Prompts werden geladen...": "Loading prompts...", + "Fehler beim Laden der Prompts": "Error loading prompts", + "Erneut versuchen": "Try again", + "Neuer Prompt": "New Prompt", + "Prompt": "Prompt", + "Prompts": "Prompts", + "Keine Prompts verfügbar": "No prompts available", + "Erstellt": "Created", + "Prompt ausführen": "Run prompt", + "Prompt teilen": "Share Prompt", + "Prompt löschen": "Clear prompt", + "Klicken Sie erneut zum Bestätigen": "Click again to confirm", + "Löschen...": "Deleting...", + "Zum Bestätigen klicken": "Click to confirm", + "Fehler beim Löschen": "Error deleting", + "Prompt wird gelöscht...": "Deleting prompt...", + "Verwalten Sie Ihre Service-Verbindungen": "Manage your service connections", + "Google verbinden": "Connect Google", + "Microsoft verbinden": "Connect Microsoft", + "Google-Verbindung hinzufügen": "Add Google Connection", + "Microsoft-Verbindung hinzufügen": "Add Microsoft Connection", + "Google-Verbindung erstellen": "Create Google Connection", + "Microsoft-Verbindung erstellen": "Create Microsoft Connection", + "{authority} Verbindung bearbeiten": "Edit {authority} Connection", + "Verbindung aktualisieren": "Update Connection", + "Service-Verbindungen": "Service Connections", + "Verbindungsfehler": "Connection Error", + "Trennungsfehler": "Disconnect Error", + "Unbekannt": "Unknown", + "Nicht verfügbar": "N/A", + "Ungültiges Datum": "Invalid date", + "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?": "Are you sure you want to delete the {service} connection?", + "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?": "Are you sure you want to delete {count} connections?", + "Service": "Service", + "Externer Benutzername": "External Username", + "Externe E-Mail": "External Email", + "Verbunden am": "Connected At", + "Zuletzt geprüft": "Last Checked", + "Läuft ab am": "Expires At", + "Google": "Google", + "Microsoft": "Microsoft", + "Lokal": "Local", + "Externen Benutzernamen eingeben": "Enter external username", + "Externe E-Mail-Adresse eingeben": "Enter external email address", + "Aktualisieren": "Update", + "Verbinden": "Connect", + "Neuen Prompt erstellen": "Create New Prompt", + "Name ist erforderlich": "Name is required", + "Inhalt ist erforderlich": "Content is required", + "Fehler beim Erstellen des Prompts": "Error creating prompt", + "Name": "Name", + "Inhalt": "Content", + "Geben Sie einen Namen für den Prompt ein": "Enter a name for the prompt", + "Geben Sie den Inhalt des Prompts ein": "Enter the prompt content", + "Prompt erstellen": "Create Prompt", + "Benutzer auswählen": "Select Users", + "Alle auswählen": "Select all", + "Alle abwählen": "Deselect all", + "Benutzer werden geladen...": "Loading users...", + "Fehler beim Laden der Benutzer": "Error loading users", + "Keine Benutzer verfügbar": "No users available", + "Bitte wählen Sie mindestens einen Benutzer aus": "Please select at least one user", + "1 Benutzer ausgewählt": "1 user selected", + "{count} Benutzer ausgewählt": "{count} users selected", + "Benutzerdefinierter Titel (optional)": "Custom Title (optional)", + "Geben Sie einen benutzerdefinierten Titel ein": "Enter a custom title", + "Nachricht (optional)": "Message (optional)", + "Fügen Sie eine Nachricht für die Empfänger hinzu": "Add a message for recipients", + "Teilen": "Share", + "Wird geteilt...": "Sharing...", + "Fehler beim Teilen des Prompts": "Error sharing prompt", + "Prompt Einstellungen": "Prompt Settings", + "Einstellungen werden in zukünftigen Updates hinzugefügt.": "Settings content will be added here in future updates.", + "Gespräch fortsetzen...": "Continue the conversation...", + "Nachricht eingeben...": "Enter message...", + "Datei entfernen": "Remove file", + "Datei anhängen": "Attach file", + "You": "You", + "Klicken Sie, um zu öffnen": "Click to open", + "Dokument vorschauen": "Preview document", + "Dokument herunterladen": "Download document", + "Workflow fehlgeschlagen.": "Workflow failed.", + "Nochmal versuchen": "Try Again", + "Folgenachricht wird gesendet...": "Sending follow-up message...", + "Nachricht wird gesendet...": "Sending message...", + "Fehler:": "Error:", + "Fehler beim Laden der Nachrichten:": "Error loading messages:", + "Workflow-Nachrichten werden geladen...": "Loading workflow messages...", + "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …": "Start a conversation by entering a message, selecting a template, or continuing a previous workflow...", + "Oder geben Sie Ihre Nachricht ein...": "Or enter your message...", + "Workflow wird fortgesetzt": "Continuing workflow", + "Datei": "File", + "angehängt": "attached", + "Dateien anhängen": "Attach Files", + "Wird gesendet...": "Sending...", + "Wird verarbeitet...": "Processing...", + "Fortsetzen": "Continue", + "Senden": "Send", + "Stoppen": "Stop", + "Wird gestoppt...": "Stopping...", + "Dateien hier ablegen zum Anhängen": "Drop files here to attach", + "Datei-Ablage während Workflow deaktiviert": "File drop disabled during workflow", + "Chat leeren...": "New Chat", + "Verwende Vorlage:": "Using prompt:", + "Prompt auswählen...": "Select a prompt...", + "Vorschau wird geladen...": "Loading preview...", + "Keine Vorschau verfügbar": "No preview available", + "Vorschau schließen": "Close preview", + "Python": "Python", + "Workflows werden geladen...": "Loading workflows...", + "Fehler beim Laden der Workflows:": "Error loading workflows:", + "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?": "Are you sure you want to delete workflow \"{id}...\"?", + "Kein Nachrichteninhalt verfügbar": "No message content available", + "Unbekanntes Datum": "Unknown Date", + "Gestartet:": "Started:", + "Letzte Aktivität:": "Last Activity:", + "Runde": "Round", + "Workflow fortsetzen": "Resume workflow", + "Workflow löschen": "Delete workflow", + "Workflow wird gelöscht...": "Deleting workflow...", + "Noch keinen Workflow ausgewählt": "No workflow selected", + "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow": "Select a workflow from the list or start a new workflow", + "Lade Fortschritt...": "Loading progress...", + "Aufgaben": "Tasks", + "Workflow Fortschritt": "Workflow Progress", + "Analysiere Workflow...": "Analyzing workflow...", + "Nach unten scrollen": "Scroll to bottom", + "FEHLER": "ERROR", + "FEHLGESCHLAGEN": "FAILED", + "GESTOPPT": "STOPPED", + "ABGEBROCHEN": "CANCELLED", + "LÄUFT": "RUNNING", + "VERARBEITUNG": "PROCESSING", + "ABGESCHLOSSEN": "COMPLETED", + "WARTEND": "PENDING", + "Unbekannte Größe": "Unknown Size", + "Hochgeladen": "Uploaded", + "KI-erstellt": "AI-created", + "Geteilt": "Shared", + "Datei vorschauen": "Preview file", + "Datei herunterladen": "Download file", + "Datei löschen": "Delete file", + "Klicken Sie erneut zum Bestätigen der Löschung": "Click again to confirm deletion", + "Zum Bestätigen klicken...": "Click to confirm...", + "Keine Dateien gefunden.": "No files found.", + "Keine mit Ihnen geteilten Dateien gefunden.": "No shared files found.", + "Keine von der KI erstellten Dateien gefunden.": "No AI-created files found.", + "Keine hochgeladenen Dateien gefunden.": "No uploaded files found.", + "Typ": "Type", + "Größe": "Size", + "Datum": "Date", + "Dateien auswählen": "Select files", + "Alle Dateien": "All Files", + "ausgewählt": "selected", + "Neue Datei hochladen": "Upload new file", + "Dateien werden geladen...": "Loading files...", + "Fehler beim Laden der Dateien:": "Error loading files:", + "Datei hochladen": "Upload file", + "Datei hier ablegen...": "Drop file here...", + "Lädt hoch...": "Uploading...", + "Dateien hierher ziehen": "Drag files here", + "oder": "or", + "Durchsuchen": "Browse", + "Ausgewählte Datei:": "Selected file:", + "Hochladen": "Upload", + "Wird hochgeladen...": "Uploading...", + "Datei erfolgreich hochgeladen!": "File uploaded successfully!", + "Beim Hochladen ist ein Fehler aufgetreten.": "An error occurred while uploading.", + "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.": "An unexpected error occurred while uploading.", + "Dateien hier ablegen": "Drop files here", + "Dateien hochladen": "Upload files", + "Meine Uploads": "My Uploads", + "Erstellte Dateien": "Created Files", + "Geteilte Dateien": "Shared Files", + "Datei hinzufügen": "Add File", + "Dateiname": "File Name", + "MIME-Typ": "MIME Type", + "Dateigröße": "File Size", + "Erstellungsdatum": "Creation Date", + "Quelle": "Source", + "Bild": "Image", + "PDF": "PDF", + "Dokument": "Document", + "Tabelle": "Spreadsheet", + "Text": "Text", + "Video": "Video", + "Audio": "Audio", + "Vorschau": "Preview", + "Herunterladen": "Download", + "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?": "Are you sure you want to delete the file \"{name}\"?", + "Dateivorschau": "File Preview", + "Vorschau für diesen Dateityp nicht verfügbar": "Preview not available for this file type", + "Fehler beim Laden der Vorschau": "Error loading preview", + "Textvorschau": "Text Preview", + "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.": "This file appears to be corrupted. It has a PDF extension but contains text content. Please re-upload the file if possible.", + "Keine Workflows gefunden": "No workflows found", + "ID": "ID", + "Letzte Aktivität": "Last Activity", + "Läuft": "Running", + "Abgeschlossen": "Completed", + "Fehlgeschlagen": "Failed", + "Gestoppt": "Stopped", + "Wartend": "Pending", + "Workflow stoppen": "Stop workflow", + "Unbenannter Workflow": "Unnamed Workflow", + "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?": "Are you sure you want to delete workflow \"{name}\"?", + "Suchen...": "Search...", + "Daten aktualisieren": "Refresh data", + "Ja": "Yes", + "Nein": "No", + "Filter löschen": "Clear filter", + "{column} filtern": "Filter {column}", + "Aktionen": "Actions", + "Seite {page} von {total} ({count} Einträge)": "Page {page} of {total} ({count} items)", + "Einträge pro Seite:": "Items per page:", + "Erste Seite": "First page", + "Vorherige Seite": "Previous page", + "Nächste Seite": "Next page", + "Letzte Seite": "Last page", + "Alle Elemente auswählen": "Select all items", + "Dieses Element auswählen": "Select this item", + "Dieses Element kann nicht ausgewählt werden": "This item cannot be selected", + "Löschen ({count})": "Delete ({count})", + "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?": "Are you sure you want to delete the {count} selected items?", + "Prompts verwalten": "Manage your prompts", + "Prompts für Ihren KI-Assistenten erstellen und verwalten": "Create and manage prompts for your AI assistant", + "Prompt hinzufügen": "Add Prompt", + "Mandat-ID": "Mandate ID", + "Unbenannt": "Unnamed", + "Kopieren": "Copy", + "Keine Berechtigung zum Löschen des Prompts": "No permission to delete prompt", + "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?": "Are you sure you want to delete \"{name}\"?", + "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?": "Are you sure you want to delete {count} prompts?", + "Prompt-Name": "Prompt Name", + "Prompt-Inhalt": "Prompt Content", + "Prompt-Name darf nicht leer sein": "Prompt name cannot be empty", + "Prompt-Name darf 100 Zeichen nicht überschreiten": "Prompt name cannot exceed 100 characters", + "Prompt-Inhalt darf nicht leer sein": "Prompt content cannot be empty", + "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten": "Prompt content cannot exceed 10,000 characters", + "Fehler beim Laden der Prompts:": "Error loading prompts:", + "Prompt bearbeiten": "Edit Prompt", + "Prompt erfolgreich erstellt": "Prompt created successfully", + "Benutzer": "User", + "Berechtigung": "Privilege", + "Aktiviert": "Enabled", + "Auth-Anbieter": "Auth Authority", + "Passwort eingeben": "Enter password", + "Kein Benutzername": "No Username", + "Kein Name": "No Name", + "Keine E-Mail": "No Email", + "Keine Sprache": "No Language", + "Keine Berechtigung": "No Privilege", + "Kein Auth-Anbieter": "No Auth Authority", + "Betrachter": "Viewer", + "Administrator": "Admin", + "Systemadministrator": "Sysadmin", + "Benutzer bearbeiten": "Edit User", + "Benutzer hinzufügen": "Add User", + "Benutzer erstellen": "Create User", + "Benutzer löschen": "Delete User", + "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?": "Are you sure you want to delete this user?", + "Diese Aktion kann nicht rückgängig gemacht werden.": "This action cannot be undone.", + "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?": "Are you sure you want to delete {count} users?", + "Fehler beim Laden der Benutzer:": "Error loading users:", + "Team-Mitglieder": "Team Members", + "Team-Mitglieder verwalten": "Manage your team members", + "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren": "Manage team members, set permissions, and configure collaboration settings", + "Mitglied hinzufügen": "Add Member", + "Passwort-Link senden": "Send password setup link", + "Passwort-Link gesendet!": "Password link sent!", + "Link konnte nicht gesendet werden": "Failed to send link", + "Neues Team-Mitglied erstellen": "Create New Team Member", + "Team-Mitglied erfolgreich erstellt": "Team member created successfully", + "Fehler beim Erstellen des Team-Mitglieds": "Error creating team member", + "SharePoint Dokumente": "SharePoint Documents", + "Fehler beim Laden der SharePoint Dokumente:": "Error loading SharePoint documents:", + "Verbindung testen": "Test Connection", + "Dokumente auflisten": "List Documents", + "Sites entdecken": "Discover Sites", + "Dokumentname": "Document Name", + "Pfad": "Path", + "Anzeigen": "View", + "Microsoft Verbindungen": "Microsoft Connections", + "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.": "No Microsoft connections found. Please create a connection first.", + "Verbindungen werden geladen...": "Loading connections...", + "Entdeckte Sites": "Discovered Sites", + "Keine SharePoint-Sites gefunden": "No SharePoint sites found", + "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.": "Authentication token expired or invalid. Please reconnect your Microsoft account.", + "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.": "Try reconnecting your Microsoft account in the Connections page.", + "SharePoint Site URL": "SharePoint Site URL", + "Ordnerpfade": "Folder Paths", + "Sprach Integration": "Speech Integration", + "Unterstützt von": "Powered by", + "Virtual Assistant (VA)": "Virtual Assistant (VA)", + "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.": "Give customers a fast and efficient self-service for voice and text queries that's available 24/7.", + "Speech Analytics (SA)": "Speech Analytics (SA)", + "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.": "Automatically monitor 100% of conversations to get valuable insights for your business.", + "Voice Biometrics (VB)": "Voice Biometrics (VB)", + "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.": "Identify and authenticate callers in seconds with continuous verification and security.", + "Knowledge Agent (KA)": "Knowledge Agent (KA)", + "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.": "Unify and deliver info to your customers and staff wherever and whenever they need it.", + "Chat Platform (CP)": "Chat Platform (CP)", + "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.": "Deliver assistance in live chat and deploy intelligent chatbots in all channels.", + "Agent Assist (AA)": "Agent Assist (AA)", + "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.": "Put everything your agents need at their fingertips, with a unified agent desktop.", + "Revolutionäre Telefonie-Integration mit Spitch.ai": "Revolutionary Telephony Integration with Spitch.ai", + "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.": "Experience the future of client communication through our strategic partnership with Spitch.ai. This groundbreaking integration transforms your PowerOn platform into an intelligent telephony system that seamlessly connects external clients with companies.", + "Nahtloser Mandanten-Workflow:": "Seamless Client Workflow:", + "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.": "From registration to technical setup - your client registers with PowerOn for telephony services, uploads documents, and automatically receives a technical SIP number from Spitch. Call forwarding can be activated or deactivated at any time, ensuring maximum flexibility and BCM safety.", + "KI-gestützte Dokumentengenerierung:": "AI-Powered Document Generation:", + "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.": "Our already active document extraction engine automatically generates personalized documents for Spitch based on client-specific data. The AI uses FAQ databases, employee information, and service details to make every call contextual and highly personalized.", + "Echtzeit-Datensynchronisation:": "Real-time Data Synchronization:", + "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.": "Spitch checks client authorization with PowerOn before each call, while all data changes are centrally initiated by PowerOn. Call transcripts are stored in real-time in your PowerOn database with complete client isolation and security. In case of failures, calls are automatically blocked to ensure integrity.", + "Kosteneinsparungen & Effizienz:": "Cost Savings & Efficiency:", + "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.": "Clients can switch to the technical SIP number at any time and save significant telephony costs. The integration works like another connector (Outlook, SharePoint) and is seamlessly integrated into your existing workflow.", + "Mehr erfahren": "Learn more", + "Zurück zur Sprach Integration": "Back to Speech Integration", + "Mandat erstellen": "Create Mandate", + "Unternehmensinformationen": "Company Information", + "Firmenname": "Company Name", + "Geben Sie Ihren Firmennamen ein": "Enter your company name", + "Branche": "Industry", + "z.B. Finanzdienstleistungen, Technologie, etc.": "e.g. Financial Services, Technology, etc.", + "Geschäftszeiten": "Business Hours", + "Zeitzone": "Timezone", + "Kontaktinformationen": "Contact Information", + "kontakt@firma.com": "contact@company.com", + "Telefonnummer": "Phone Number", + "+41 123 456 789": "+41 123 456 789", + "Straße": "Street", + "Postleitzahl": "Postal Code", + "Stadt": "City", + "Land": "Country", + "Kontakte einrichten": "Setup Contacts", + "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.": "Would you like to setup contacts for your mandate now? You can also do this later in settings.", + "Jetzt überspringen": "Skip for Now", + "Firmenname ist erforderlich": "Company name is required", + "Branche ist erforderlich": "Industry is required", + "E-Mail-Adresse ist erforderlich": "Email address is required", + "Bitte geben Sie eine gültige E-Mail-Adresse ein": "Please enter a valid email address", + "Telefonnummer ist erforderlich": "Phone number is required", + "Straße ist erforderlich": "Street is required", + "Postleitzahl ist erforderlich": "Postal code is required", + "Stadt ist erforderlich": "City is required", + "Land ist erforderlich": "Country is required", + "✓ Mandat eingereicht": "✓ Mandate Submitted", + "Neu starten": "Start Over", + "Mandat erfolgreich eingereicht!": "Mandate Submitted Successfully!", + "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.": "Thank you for your interest in our Speech Integration powered by Spitch.ai. We have received your mandate and will review it shortly.", + "Eingereichte Daten:": "Submitted Data:", + "Firma": "Company", + "Telefon": "Phone", + "Adresse": "Address", + "Was passiert als nächstes?": "What happens next?", + "E-Mail-Bestätigung": "Email Confirmation", + "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.": "You will receive a confirmation email within the next few minutes.", + "Überprüfungsprozess": "Review Process", + "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.": "Our team will review your mandate within 1-2 business days.", + "Einrichtungsanruf": "Setup Call", + "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.": "If approved, we'll schedule a setup call to configure your integration.", + "Fragen?": "Questions?", + "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.": "If you have any questions about your mandate or the integration process, please don't hesitate to contact our support team.", + "Sprach-Einstellungen": "Speech Settings", + "Neues Transkript": "New Transcript", + "Aktuelle Transkripte": "Recent Transcripts", + "Keine Transkripte vorhanden": "No transcripts available", + "Dauer": "Duration", + "Transkript": "Transcript", + "Transkript wird verarbeitet...": "Processing transcript...", + "Verarbeitung": "Processing", + "Zugriff verweigert": "Access Denied", + "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.": "You must first sign up for speech integration to access transcript management.", + "Jetzt anmelden": "Sign Up Now", + "Betreff": "Subject", + "Startzeit": "Start Time", + "Endzeit": "End Time", + "Anrufer": "Caller", + "Empfänger": "Recipient", + "Tags": "Tags", + "Sprach-Integration Einstellungen": "Speech Integration Settings", + "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.": "Manage your speech integration configuration and preferences.", + "Geschäftszeiten & Zeitzone": "Business Hours & Timezone", + "Einstellungen erfolgreich gespeichert!": "Settings saved successfully!", + "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.": "Failed to save settings. Please try again.", + "Auf Standard zurücksetzen": "Reset to Default", + "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.": "Are you sure you want to reset all speech integration settings? This action cannot be undone.", + "Einstellungen wurden erfolgreich zurückgesetzt.": "Settings have been reset successfully.", + "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.": "No speech integration data found. Please sign up first to access settings.", + "Information": "Information", + "Ihre Anfrage wird verarbeitet...": "Processing your request...", + "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.": "Upload failed. Please try again.", + "Datei bereits vorhanden": "File Already Exists", + "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.": "The file \"{fileName}\" already exists with identical content. The existing file will be reused.", + "Automatisierungen": "Automations", + "Workflow-Automatisierungen verwalten": "Manage workflow automations", + "Geplante und automatisierte Workflows": "Scheduled and automated workflows", + "Neue Automatisierung": "New Automation", + "Ausführen": "Execute", + "Neue Automatisierung erstellen": "Create New Automation", + "Automatisierung erfolgreich erstellt": "Automation created successfully", + "Fehler beim Erstellen der Automatisierung": "Error creating automation", + "Basisdaten": "Base Data", + "Grundlegende Daten und Ressourcen": "Basic data and resources", + "Werkzeuge": "Utils", + "Werkzeuge und Hilfsmittel": "Utilities and tools", + "Verwaltungs- und Management-Tools": "Administration and management tools", + "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.": "This section contains all administration and management tools for your workspace.", + "Verfügbare Tools": "Available Tools", + "Management-Tools umfassen:": "Management tools include:", + "Dateiverwaltung - Dokumente hochladen und organisieren": "File Management - Upload and organize documents", + "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten": "User Management - Manage team members and permissions", + "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren": "System Settings - Configure workspace settings", + "Datenverwaltung - Datenimporte und -exporte verwalten": "Data Management - Handle data imports and exports", + "Mandate": "Mandates", + "Mandate und Berechtigungen verwalten": "Manage mandates and permissions", + "Mandatsverwaltung": "Mandate management", + "Verwalten Sie Mandate und deren zugehörige Berechtigungen.": "Manage mandates and their associated permissions.", + "Mandat hinzufügen": "Add Mandate", + "Neues Mandat erstellen": "Create New Mandate", + "Mandat erfolgreich erstellt": "Mandate created successfully", + "Fehler beim Erstellen des Mandats": "Error creating mandate", + "RBAC-Regeln": "RBAC Rules", + "Rollenbasierte Zugriffssteuerungsregeln": "Role-Based Access Control rules", + "RBAC-Regelverwaltung": "RBAC rules management", + "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.": "Configure and manage Role-Based Access Control rules.", + "RBAC-Regel hinzufügen": "Add RBAC Rule", + "Neue RBAC-Regel erstellen": "Create New RBAC Rule", + "RBAC-Regel erfolgreich erstellt": "RBAC rule created successfully", + "Fehler beim Erstellen der RBAC-Regel": "Error creating RBAC rule", + "RBAC-Rollen": "RBAC Roles", + "Rollenverwaltung": "Role management", + "RBAC-Rollenverwaltung": "RBAC role management", + "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.": "Create and manage RBAC roles and their permissions.", + "Rolle hinzufügen": "Add Role", + "Neue Rolle erstellen": "Create New Role", + "Rolle erfolgreich erstellt": "Role created successfully", + "Fehler beim Erstellen der Rolle": "Error creating role", + "Admin-Einstellungen": "Admin Settings", + "Administrative Einstellungen": "Administrative settings", + "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.": "Configure administrative settings and system preferences.", + "Start": "Start", + "Willkommen in Ihrem Arbeitsbereich": "Welcome to your workspace", + "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.": "This is your starting point for accessing all workspace features and tools.", + "Schnellzugriff": "Quick Access", + "Beginnen Sie mit:": "Get started with:", + "Schnellzugriff - Springen Sie zu häufig verwendeten Features": "Quick Access - Jump to frequently used features", + "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit": "Recent Activities - View your latest work", + "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates": "Overview - See workspace status and updates", + "Navigation - Erkunden Sie alle verfügbaren Tools": "Navigation - Explore all available tools", + "Projekte": "Projects", + "Projektverwaltung": "Project Management", + "Projektverwaltung und -organisation": "Project management and organization", + "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.": "Search for locations by address or coordinates, or use natural language to create and manage projects.", + "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")": "Enter a command (e.g., \"Create a new project named 'Main Street 42'\")", + "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.": "No commands executed yet. Send a command to see results here.", + "Datenverwaltung": "Data Management", + "Datenverwaltung mit Tabellen": "Data management with tables", + "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.": "Manage data through tables. Select a table or use natural language to execute commands.", + "Sie können auch auf den Upload-Button klicken": "You can also click the upload button", + "Dateien werden verarbeitet...": "Processing files...", + "Fehler beim Verarbeiten der Dateien": "Error processing files", + "Treuhand": "Trustee", + "Treuhandverwaltung": "Trustee Management", + "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen": "Manage trustee organisations, contracts, and bookings", + "Organisationen": "Organisations", + "Trustee-Organisationen verwalten": "Manage trustee organisations", + "Verwaltung der Treuhand-Organisationen": "Management of trustee organisations", + "Neue Organisation": "New Organisation", + "z.B. treuhand-ag-zuerich": "e.g. trustee-ag-zurich", + "Bezeichnung": "Label", + "z.B. Treuhand AG Zürich": "e.g. Trustee AG Zurich", + "Neue Organisation erstellen": "Create New Organisation", + "Organisation erfolgreich erstellt": "Organisation created successfully", + "Fehler beim Erstellen der Organisation": "Error creating organisation", + "Rollen": "Roles", + "Trustee-Rollen verwalten": "Manage trustee roles", + "Verwaltung der Feature-spezifischen Rollen": "Management of feature-specific roles", + "Neue Rolle": "New Role", + "Rollen-ID": "Role ID", + "z.B. admin, operate, userreport": "e.g. admin, operate, userreport", + "Beschreibung": "Description", + "Beschreibung der Rolle": "Role description", + "Zugriff": "Access", + "Benutzer-Zugriff verwalten": "Manage user access", + "Verwaltung der Benutzerzugriffe auf Organisationen": "Management of user access to organisations", + "Neuer Zugriff": "New Access", + "Organisation": "Organisation", + "Rolle": "Role", + "Vertrag (optional)": "Contract (optional)", + "Leer = Zugriff auf alle Verträge": "Empty = Access to all contracts", + "Neuen Zugriff erstellen": "Create New Access", + "Zugriff erfolgreich erstellt": "Access created successfully", + "Fehler beim Erstellen des Zugriffs": "Error creating access", + "Verträge": "Contracts", + "Kundenverträge verwalten": "Manage customer contracts", + "Verwaltung der Kundenverträge": "Management of customer contracts", + "Neuer Vertrag": "New Contract", + "z.B. Muster AG 2026": "e.g. Muster AG 2026", + "Neuen Vertrag erstellen": "Create New Contract", + "Vertrag erfolgreich erstellt": "Contract created successfully", + "Fehler beim Erstellen des Vertrags": "Error creating contract", + "Dokumente": "Documents", + "Belege verwalten": "Manage receipts", + "Verwaltung der Dokumente und Belege": "Management of documents and receipts", + "Neues Dokument": "New Document", + "Vertrag": "Contract", + "z.B. Beleg.pdf": "e.g. Receipt.pdf", + "Dateityp": "File Type", + "Neues Dokument erstellen": "Create New Document", + "Dokument erfolgreich erstellt": "Document created successfully", + "Fehler beim Erstellen des Dokuments": "Error creating document", + "Positionen": "Positions", + "Buchungspositionen verwalten": "Manage booking positions", + "Verwaltung der Buchungspositionen (Speseneinträge)": "Management of booking positions (expense entries)", + "Neue Position": "New Position", + "Valutadatum": "Value Date", + "Name des Unternehmens": "Company name", + "Buchungswährung": "Booking Currency", + "Buchungsbetrag": "Booking Amount", + "Originalwährung": "Original Currency", + "Originalbetrag": "Original Amount", + "MwSt %": "VAT %", + "MwSt Betrag": "VAT Amount", + "Neue Position erstellen": "Create New Position", + "Position erfolgreich erstellt": "Position created successfully", + "Fehler beim Erstellen der Position": "Error creating position", + "UI-Sprachen": "UI languages", + "Globale Sprachsets verwalten (SysAdmin).": "Manage global UI language sets (SysAdmin).", + "Alle aktualisieren": "Update all", + "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?": "Synchronize all non-default language sets with the German master now?", + "Neue Sprache": "New language", + "Anzeigename": "Display name", + "Hinzufügen": "Add", + "Keine Einträge": "No entries", + "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?": "Creating a new language may consume AI credits from your mandate pool. Continue?", + "Sprachset {code} wirklich löschen?": "Really delete language set {code}?", + "Fortfahren": "Continue" + }, + "status": "complete", + "isDefault": false + }, + { + "id": "fr", + "label": "Français", + "keys": { + "Zentrale": "Centre d'activité", + "Dateien": "Fichiers", + "Team-Bereich": "Espace équipe", + "Verbindungen": "Connexions", + "Workflows": "Workflows", + "Einstellungen": "Paramètres", + "SharePoint Test": "Test SharePoint", + "Sprache": "Langue", + "Transkriptverwaltung": "Gestion des Transcriptions", + "Darstellung": "Apparence", + "Über": "À propos", + "Version": "Version", + "Theme": "Thème", + "Wechseln Sie zwischen hellem und dunklem Modus": "Basculer entre le mode clair et sombre", + "Wählen Sie Ihre bevorzugte Sprache": "Choisissez votre langue préférée", + "Hell": "Clair", + "Dunkel": "Sombre", + "Zu hellem Modus wechseln": "Passer en mode clair", + "Zu dunklem Modus wechseln": "Passer en mode sombre", + "Benutzerinformationen": "Informations utilisateur", + "Verwalten Sie Ihre Kontoinformationen": "Gérez vos informations de compte", + "Benutzername": "Nom d'utilisateur", + "Vollständiger Name": "Nom complet", + "E-Mail-Adresse": "Adresse Email", + "Rufname am Telefon": "Nom au téléphone", + "Wie möchten Sie am Telefon genannt werden?": "Comment souhaitez-vous être appelé au téléphone ?", + "Berechtigungsstufe": "Niveau de privilège", + "Kontostatus": "Statut du compte", + "Authentifizierungsanbieter": "Fournisseur d'authentification", + "Aktiv": "Actif", + "Inaktiv": "Inactif", + "Benutzerinformationen werden geladen...": "Chargement des informations utilisateur...", + "Fehler beim Laden der Benutzerinformationen": "Erreur lors du chargement des informations utilisateur", + "Änderungen speichern": "Sauvegarder les Modifications", + "Speichern...": "Sauvegarde...", + "Benutzerinformationen erfolgreich aktualisiert": "Informations utilisateur mises à jour avec succès", + "Fehler beim Aktualisieren der Benutzerinformationen": "Erreur lors de la mise à jour des informations utilisateur", + "Verwaltet von {provider}": "Géré par {provider}", + "Dieses Feld wird von {provider} verwaltet und kann nicht geändert werden": "Ce champ est géré par {provider} et ne peut pas être modifié", + "Deutsch": "Deutsch", + "English": "English", + "Français": "Français", + "Laden...": "Téléchargement...", + "Fehler": "Erreur", + "Erfolgreich": "Succès", + "Abbrechen": "Annuler", + "Speichern": "Enregistrer", + "Löschen": "Supprimer", + "Bearbeiten": "Modifier", + "Schließen": "Fermer", + "Wiederholen": "Réessayer", + "Erstellen": "Créer", + "Erstellen...": "Création...", + "Anmelden": "Se connecter", + "Registrieren": "S'inscrire", + "Abmelden": "Se déconnecter", + "E-Mail": "Email", + "Passwort": "Mot de passe", + "Prompt Vorlage": "Modèle de prompt", + "Chatbereich": "Zone de chat", + "Workflow-Verlauf": "Historique des workflows", + "Log": "Journal", + "Workflow": "Workflow", + "Kein Workflow ausgewählt": "Aucun workflow sélectionné", + "Logs werden geladen...": "Chargement des logs...", + "Fehler beim Laden der Logs": "Erreur lors du chargement des logs", + "Keine Logs für diesen Workflow verfügbar": "Aucun log disponible pour ce workflow", + "Workflow läuft... Warte auf Logs...": "Workflow en cours... En attente des logs...", + "Logs konnten nicht geladen werden": "Échec du chargement des logs", + "INFO": "INFO", + "Workflow auswählen": "Sélectionner un workflow", + "Verfügbare Workflows": "Workflows disponibles", + "Keine Workflows verfügbar": "Aucun workflow disponible", + "Status": "Statut", + "Runden": "Tours", + "Nachrichten": "Messages", + "Token": "Jetons", + "Daten gesendet": "Données envoyées", + "Daten empfangen": "Données reçues", + "Erfolgsrate": "Taux de succès", + "Gestartet": "Démarré", + "Prompts werden geladen...": "Chargement des prompts...", + "Fehler beim Laden der Prompts": "Erreur lors du chargement des prompts", + "Erneut versuchen": "Réessayer", + "Neuer Prompt": "Nouveau prompt", + "Prompt": "Prompt", + "Prompts": "Prompts", + "Keine Prompts verfügbar": "Aucun prompt disponible", + "Erstellt": "Créé", + "Prompt ausführen": "Exécuter le prompt", + "Prompt teilen": "Partager le prompt", + "Prompt löschen": "Effacer le prompt", + "Klicken Sie erneut zum Bestätigen": "Cliquez à nouveau pour confirmer", + "Löschen...": "Suppression...", + "Zum Bestätigen klicken": "Cliquez pour confirmer", + "Fehler beim Löschen": "Erreur lors de la suppression", + "Prompt wird gelöscht...": "Suppression du prompt...", + "Verwalten Sie Ihre Service-Verbindungen": "Gérez vos connexions de service", + "Google verbinden": "Connecter Google", + "Microsoft verbinden": "Connecter Microsoft", + "Google-Verbindung hinzufügen": "Ajouter une connexion Google", + "Microsoft-Verbindung hinzufügen": "Ajouter une connexion Microsoft", + "Google-Verbindung erstellen": "Créer une connexion Google", + "Microsoft-Verbindung erstellen": "Créer une connexion Microsoft", + "{authority} Verbindung bearbeiten": "Modifier la connexion {authority}", + "Verbindung aktualisieren": "Mettre à jour la connexion", + "Service-Verbindungen": "Connexions de service", + "Verbindungsfehler": "Erreur de connexion", + "Trennungsfehler": "Erreur de déconnexion", + "Unbekannt": "Inconnu", + "Nicht verfügbar": "N/D", + "Ungültiges Datum": "Date invalide", + "Sind Sie sicher, dass Sie die {service} Verbindung löschen möchten?": "Êtes-vous sûr de vouloir supprimer la connexion {service} ?", + "Sind Sie sicher, dass Sie {count} Verbindungen löschen möchten?": "Êtes-vous sûr de vouloir supprimer {count} connexions ?", + "Service": "Service", + "Externer Benutzername": "Nom d'utilisateur externe", + "Externe E-Mail": "E-mail externe", + "Verbunden am": "Connecté le", + "Zuletzt geprüft": "Dernière vérification", + "Läuft ab am": "Expire le", + "Google": "Google", + "Microsoft": "Microsoft", + "Lokal": "Local", + "Externen Benutzernamen eingeben": "Entrez le nom d'utilisateur externe", + "Externe E-Mail-Adresse eingeben": "Entrez l'adresse e-mail externe", + "Aktualisieren": "Mettre à jour", + "Verbinden": "Connecter", + "Neuen Prompt erstellen": "Créer un nouveau prompt", + "Name ist erforderlich": "Le nom est requis", + "Inhalt ist erforderlich": "Le contenu est requis", + "Fehler beim Erstellen des Prompts": "Erreur lors de la création du prompt", + "Name": "Nom", + "Inhalt": "Contenu", + "Geben Sie einen Namen für den Prompt ein": "Entrez un nom pour le prompt", + "Geben Sie den Inhalt des Prompts ein": "Entrez le contenu du prompt", + "Prompt erstellen": "Créer le prompt", + "Benutzer auswählen": "Sélectionner les utilisateurs", + "Alle auswählen": "Tout sélectionner", + "Alle abwählen": "Tout désélectionner", + "Benutzer werden geladen...": "Chargement des utilisateurs...", + "Fehler beim Laden der Benutzer": "Erreur lors du chargement des utilisateurs", + "Keine Benutzer verfügbar": "Aucun utilisateur disponible", + "Bitte wählen Sie mindestens einen Benutzer aus": "Veuillez sélectionner au moins un utilisateur", + "1 Benutzer ausgewählt": "1 utilisateur sélectionné", + "{count} Benutzer ausgewählt": "{count} utilisateurs sélectionnés", + "Benutzerdefinierter Titel (optional)": "Titre personnalisé (facultatif)", + "Geben Sie einen benutzerdefinierten Titel ein": "Entrez un titre personnalisé", + "Nachricht (optional)": "Message (facultatif)", + "Fügen Sie eine Nachricht für die Empfänger hinzu": "Ajoutez un message pour les destinataires", + "Teilen": "Partager", + "Wird geteilt...": "Partage en cours...", + "Fehler beim Teilen des Prompts": "Erreur lors du partage du prompt", + "Prompt Einstellungen": "Paramètres de prompt", + "Einstellungen werden in zukünftigen Updates hinzugefügt.": "Le contenu des paramètres sera ajouté dans les futures mises à jour.", + "Gespräch fortsetzen...": "Continuer la conversation...", + "Nachricht eingeben...": "Entrez votre message...", + "Datei entfernen": "Supprimer le fichier", + "Datei anhängen": "Joindre un fichier", + "You": "Vous", + "Klicken Sie, um zu öffnen": "Cliquez pour ouvrir", + "Dokument vorschauen": "Aperçu du document", + "Dokument herunterladen": "Télécharger le document", + "Workflow fehlgeschlagen.": "Échec du workflow.", + "Nochmal versuchen": "Réessayer", + "Folgenachricht wird gesendet...": "Envoi du message de suivi...", + "Nachricht wird gesendet...": "Envoi du message...", + "Fehler:": "Erreur:", + "Fehler beim Laden der Nachrichten:": "Erreur lors du chargement des messages:", + "Workflow-Nachrichten werden geladen...": "Chargement des messages de workflow...", + "Beginne ein Gespräch, indem du eine Nachricht eingibst, eine Vorlage auswählst oder einen vorherigen Workflow fortsetzt …": "Commencez une conversation en entrant un message, en sélectionnant un modèle ou en continuant un workflow précédent...", + "Oder geben Sie Ihre Nachricht ein...": "Ou entrez votre message...", + "Workflow wird fortgesetzt": "Workflow en cours", + "Datei": "Fichier", + "angehängt": "attaché", + "Dateien anhängen": "Joindre des fichiers", + "Wird gesendet...": "Envoi...", + "Wird verarbeitet...": "Traitement...", + "Fortsetzen": "Continuer", + "Senden": "Envoyer", + "Stoppen": "Arrêter", + "Wird gestoppt...": "Arrêt...", + "Dateien hier ablegen zum Anhängen": "Déposez les fichiers ici pour les joindre", + "Datei-Ablage während Workflow deaktiviert": "Dépôt de fichiers désactivé pendant le workflow", + "Chat leeren...": "Nouveau Chat", + "Verwende Vorlage:": "Utilisation du modèle:", + "Prompt auswählen...": "Sélectionner un prompt...", + "Vorschau wird geladen...": "Chargement de l'aperçu...", + "Keine Vorschau verfügbar": "Aucun aperçu disponible", + "Vorschau schließen": "Fermer l'aperçu", + "Python": "Python", + "Workflows werden geladen...": "Chargement des workflows...", + "Fehler beim Laden der Workflows:": "Erreur lors du chargement des workflows:", + "Sind Sie sicher, dass Sie Workflow \"{id}...\" löschen möchten?": "Êtes-vous sûr de vouloir supprimer le workflow \"{id}...\"?", + "Kein Nachrichteninhalt verfügbar": "Aucun contenu de message disponible", + "Unbekanntes Datum": "Date inconnue", + "Gestartet:": "Démarré:", + "Letzte Aktivität:": "Dernière activité:", + "Runde": "Tour", + "Workflow fortsetzen": "Reprendre le workflow", + "Workflow löschen": "Supprimer le workflow", + "Workflow wird gelöscht...": "Suppression du workflow...", + "Noch keinen Workflow ausgewählt": "Aucun workflow sélectionné", + "Wähle einen Workflow aus der Liste aus oder starte einen neuen Workflow": "Sélectionnez un workflow dans la liste ou démarrez un nouveau workflow", + "Lade Fortschritt...": "Chargement du progrès...", + "Aufgaben": "Tâches", + "Workflow Fortschritt": "Progression du workflow", + "Analysiere Workflow...": "Analyse du workflow...", + "Nach unten scrollen": "Faire défiler vers le bas", + "FEHLER": "ERREUR", + "FEHLGESCHLAGEN": "ÉCHEC", + "GESTOPPT": "ARRÊTÉ", + "ABGEBROCHEN": "ANNULÉ", + "LÄUFT": "EN COURS", + "VERARBEITUNG": "TRAITEMENT", + "ABGESCHLOSSEN": "TERMINÉ", + "WARTEND": "EN ATTENTE", + "Unbekannte Größe": "Taille inconnue", + "Hochgeladen": "Téléchargés", + "KI-erstellt": "Créés par IA", + "Geteilt": "Partagés", + "Datei vorschauen": "Aperçu du fichier", + "Datei herunterladen": "Télécharger le fichier", + "Datei löschen": "Supprimer le fichier", + "Klicken Sie erneut zum Bestätigen der Löschung": "Cliquez à nouveau pour confirmer la suppression", + "Zum Bestätigen klicken...": "Cliquez pour confirmer...", + "Keine Dateien gefunden.": "Aucun fichier trouvé.", + "Keine mit Ihnen geteilten Dateien gefunden.": "Aucun fichier partagé trouvé.", + "Keine von der KI erstellten Dateien gefunden.": "Aucun fichier créé par IA trouvé.", + "Keine hochgeladenen Dateien gefunden.": "Aucun fichier téléchargé trouvé.", + "Typ": "Type", + "Größe": "Taille", + "Datum": "Date", + "Dateien auswählen": "Sélectionner des fichiers", + "Alle Dateien": "Tous les fichiers", + "ausgewählt": "sélectionné(s)", + "Neue Datei hochladen": "Télécharger un nouveau fichier", + "Dateien werden geladen...": "Chargement des fichiers...", + "Fehler beim Laden der Dateien:": "Erreur lors du chargement des fichiers:", + "Datei hochladen": "Télécharger un fichier", + "Datei hier ablegen...": "Déposer le fichier ici...", + "Lädt hoch...": "Téléchargement...", + "Dateien hierher ziehen": "Glisser les fichiers ici", + "oder": "ou", + "Durchsuchen": "Parcourir", + "Ausgewählte Datei:": "Fichier sélectionné:", + "Hochladen": "Télécharger", + "Wird hochgeladen...": "Téléchargement...", + "Datei erfolgreich hochgeladen!": "Fichier téléchargé avec succès !", + "Beim Hochladen ist ein Fehler aufgetreten.": "Une erreur s'est produite lors du téléchargement.", + "Beim Hochladen ist ein unerwarteter Fehler aufgetreten.": "Une erreur inattendue s'est produite lors du téléchargement.", + "Dateien hier ablegen": "Déposer les fichiers ici", + "Dateien hochladen": "Télécharger des fichiers", + "Meine Uploads": "Mes téléchargements", + "Erstellte Dateien": "Fichiers créés", + "Geteilte Dateien": "Fichiers partagés", + "Datei hinzufügen": "Ajouter un fichier", + "Dateiname": "Nom du fichier", + "MIME-Typ": "Type MIME", + "Dateigröße": "Taille du fichier", + "Erstellungsdatum": "Date de création", + "Quelle": "Source", + "Bild": "Image", + "PDF": "PDF", + "Dokument": "Document", + "Tabelle": "Feuille de calcul", + "Text": "Texte", + "Video": "Vidéo", + "Audio": "Audio", + "Vorschau": "Aperçu", + "Herunterladen": "Télécharger", + "Sind Sie sicher, dass Sie die Datei \"{name}\" löschen möchten?": "Êtes-vous sûr de vouloir supprimer le fichier \"{name}\"?", + "Dateivorschau": "Aperçu du fichier", + "Vorschau für diesen Dateityp nicht verfügbar": "Aperçu non disponible pour ce type de fichier", + "Fehler beim Laden der Vorschau": "Erreur lors du chargement de l'aperçu", + "Textvorschau": "Aperçu du texte", + "Diese Datei scheint beschädigt zu sein. Sie hat eine PDF-Erweiterung, enthält aber Textinhalte. Bitte laden Sie die Datei erneut hoch, falls möglich.": "Ce fichier semble être corrompu. Il a une extension PDF mais contient du contenu texte. Veuillez le télécharger à nouveau si possible.", + "Keine Workflows gefunden": "Aucun workflow trouvé", + "ID": "ID", + "Letzte Aktivität": "Dernière activité", + "Läuft": "En cours", + "Abgeschlossen": "Terminé", + "Fehlgeschlagen": "Échoué", + "Gestoppt": "Arrêté", + "Wartend": "En attente", + "Workflow stoppen": "Arrêter le workflow", + "Unbenannter Workflow": "Workflow sans nom", + "Sind Sie sicher, dass Sie den Workflow \"{name}\" löschen möchten?": "Êtes-vous sûr de vouloir supprimer le workflow \"{name}\"?", + "Suchen...": "Rechercher...", + "Daten aktualisieren": "Actualiser les données", + "Ja": "Oui", + "Nein": "Non", + "Filter löschen": "Effacer le filtre", + "{column} filtern": "Filtrer {column}", + "Aktionen": "Actions", + "Seite {page} von {total} ({count} Einträge)": "Page {page} sur {total} ({count} éléments)", + "Einträge pro Seite:": "Éléments par page:", + "Erste Seite": "Première page", + "Vorherige Seite": "Page précédente", + "Nächste Seite": "Page suivante", + "Letzte Seite": "Dernière page", + "Alle Elemente auswählen": "Sélectionner tous les éléments", + "Dieses Element auswählen": "Sélectionner cet élément", + "Dieses Element kann nicht ausgewählt werden": "Cet élément ne peut pas être sélectionné", + "Löschen ({count})": "Supprimer ({count})", + "Sind Sie sicher, dass Sie die {count} ausgewählten Elemente löschen möchten?": "Êtes-vous sûr de vouloir supprimer les {count} éléments sélectionnés ?", + "Prompts verwalten": "Gérer vos prompts", + "Prompts für Ihren KI-Assistenten erstellen und verwalten": "Créer et gérer des prompts pour votre assistant IA", + "Prompt hinzufügen": "Ajouter un prompt", + "Mandat-ID": "ID Mandat", + "Unbenannt": "Sans nom", + "Kopieren": "Copier", + "Keine Berechtigung zum Löschen des Prompts": "Aucune permission de supprimer l'invite", + "Sind Sie sicher, dass Sie \"{name}\" löschen möchten?": "Êtes-vous sûr de vouloir supprimer \"{name}\" ?", + "Sind Sie sicher, dass Sie {count} Prompts löschen möchten?": "Êtes-vous sûr de vouloir supprimer {count} prompts ?", + "Prompt-Name": "Nom du prompt", + "Prompt-Inhalt": "Contenu du prompt", + "Prompt-Name darf nicht leer sein": "Le nom du prompt ne peut pas être vide", + "Prompt-Name darf 100 Zeichen nicht überschreiten": "Le nom du prompt ne peut pas dépasser 100 caractères", + "Prompt-Inhalt darf nicht leer sein": "Le contenu du prompt ne peut pas être vide", + "Prompt-Inhalt darf 10.000 Zeichen nicht überschreiten": "Le contenu du prompt ne peut pas dépasser 10 000 caractères", + "Fehler beim Laden der Prompts:": "Erreur lors du chargement des prompts:", + "Prompt bearbeiten": "Modifier le prompt", + "Prompt erfolgreich erstellt": "Prompt créé avec succès", + "Benutzer": "Utilisateur", + "Berechtigung": "Privilège", + "Aktiviert": "Activé", + "Auth-Anbieter": "Autorité d'authentification", + "Passwort eingeben": "Entrez le mot de passe", + "Kein Benutzername": "Aucun nom d'utilisateur", + "Kein Name": "Aucun nom", + "Keine E-Mail": "Aucun e-mail", + "Keine Sprache": "Aucune langue", + "Keine Berechtigung": "Aucun privilège", + "Kein Auth-Anbieter": "Aucune autorité d'authentification", + "Betrachter": "Observateur", + "Administrator": "Administrateur", + "Systemadministrator": "Administrateur système", + "Benutzer bearbeiten": "Modifier l'utilisateur", + "Benutzer hinzufügen": "Ajouter un utilisateur", + "Benutzer erstellen": "Créer l'utilisateur", + "Benutzer löschen": "Supprimer l'utilisateur", + "Sind Sie sicher, dass Sie diesen Benutzer löschen möchten?": "Êtes-vous sûr de vouloir supprimer cet utilisateur ?", + "Diese Aktion kann nicht rückgängig gemacht werden.": "Cette action ne peut pas être annulée.", + "Sind Sie sicher, dass Sie {count} Benutzer löschen möchten?": "Êtes-vous sûr de vouloir supprimer {count} utilisateurs ?", + "Fehler beim Laden der Benutzer:": "Erreur lors du chargement des utilisateurs:", + "Team-Mitglieder": "Membres de l'équipe", + "Team-Mitglieder verwalten": "Gérer les membres de votre équipe", + "Team-Mitglieder verwalten, Berechtigungen festlegen und Zusammenarbeitseinstellungen konfigurieren": "Gérer les membres de l'équipe, définir les permissions et configurer les paramètres de collaboration", + "Mitglied hinzufügen": "Ajouter un membre", + "Passwort-Link senden": "Envoyer le lien de mot de passe", + "Passwort-Link gesendet!": "Lien de mot de passe envoyé!", + "Link konnte nicht gesendet werden": "Échec de l'envoi du lien", + "Neues Team-Mitglied erstellen": "Créer un nouveau membre de l'équipe", + "Team-Mitglied erfolgreich erstellt": "Membre de l'équipe créé avec succès", + "Fehler beim Erstellen des Team-Mitglieds": "Erreur lors de la création du membre de l'équipe", + "SharePoint Dokumente": "Documents SharePoint", + "Fehler beim Laden der SharePoint Dokumente:": "Erreur lors du chargement des documents SharePoint:", + "Verbindung testen": "Tester la connexion", + "Dokumente auflisten": "Lister les documents", + "Sites entdecken": "Découvrir les sites", + "Dokumentname": "Nom du document", + "Pfad": "Chemin", + "Anzeigen": "Voir", + "Microsoft Verbindungen": "Connexions Microsoft", + "Keine Microsoft-Verbindungen gefunden. Bitte erstellen Sie zuerst eine Verbindung.": "Aucune connexion Microsoft trouvée. Veuillez d'abord créer une connexion.", + "Verbindungen werden geladen...": "Chargement des connexions...", + "Entdeckte Sites": "Sites découverts", + "Keine SharePoint-Sites gefunden": "Aucun site SharePoint trouvé", + "Authentifizierungstoken abgelaufen oder ungültig. Bitte verbinden Sie Ihr Microsoft-Konto erneut.": "Token d'authentification expiré ou invalide. Veuillez reconnecter votre compte Microsoft.", + "Versuchen Sie, Ihr Microsoft-Konto auf der Verbindungsseite erneut zu verbinden.": "Essayez de reconnecter votre compte Microsoft dans la page Connexions.", + "SharePoint Site URL": "URL du site SharePoint", + "Ordnerpfade": "Chemins des dossiers", + "Sprach Integration": "Intégration Vocale", + "Unterstützt von": "Alimenté par", + "Virtual Assistant (VA)": "Assistant Virtuel (VA)", + "Geben Sie Kunden einen schnellen und effizienten Selbstservice für Sprach- und Textanfragen, der 24/7 verfügbar ist.": "Offrez aux clients un libre-service rapide et efficace pour les requêtes vocales et textuelles disponible 24h/24.", + "Speech Analytics (SA)": "Analyse Vocale (SA)", + "Überwachen Sie automatisch 100% der Gespräche, um wertvolle Einblicke für Ihr Unternehmen zu erhalten.": "Surveillez automatiquement 100% des conversations pour obtenir des insights précieux pour votre entreprise.", + "Voice Biometrics (VB)": "Biométrie Vocale (VB)", + "Identifizieren und authentifizieren Sie Anrufer in Sekunden mit kontinuierlicher Verifizierung und Sicherheit.": "Identifiez et authentifiez les appelants en quelques secondes avec une vérification et sécurité continues.", + "Knowledge Agent (KA)": "Agent de Connaissance (KA)", + "Vereinheitlichen und liefern Sie Informationen an Ihre Kunden und Mitarbeiter, wann und wo sie sie benötigen.": "Unifiez et livrez des informations à vos clients et employés où et quand ils en ont besoin.", + "Chat Platform (CP)": "Plateforme de Chat (CP)", + "Bieten Sie Unterstützung im Live-Chat und setzen Sie intelligente Chatbots in allen Kanälen ein.": "Offrez une assistance en chat en direct et déployez des chatbots intelligents sur tous les canaux.", + "Agent Assist (AA)": "Assistance Agent (AA)", + "Stellen Sie alles, was Ihre Agenten benötigen, in ihren Händen bereit, mit einem einheitlichen Agent-Desktop.": "Mettez tout ce dont vos agents ont besoin à portée de main, avec un bureau d'agent unifié.", + "Revolutionäre Telefonie-Integration mit Spitch.ai": "Intégration Téléphonique Révolutionnaire avec Spitch.ai", + "Erleben Sie die Zukunft der Mandantenkommunikation durch unsere strategische Partnerschaft mit Spitch.ai. Diese bahnbrechende Integration verwandelt Ihre PowerOn-Plattform in ein intelligentes Telefonie-System, das externe Mandanten nahtlos mit Unternehmen verbindet.": "Découvrez l'avenir de la communication client grâce à notre partenariat stratégique avec Spitch.ai. Cette intégration révolutionnaire transforme votre plateforme PowerOn en un système téléphonique intelligent qui connecte de manière transparente les clients externes avec les entreprises.", + "Nahtloser Mandanten-Workflow:": "Workflow Client Transparent:", + "Von der Registrierung bis zur technischen Einrichtung - Ihr Mandant registriert sich bei PowerOn für Telefonie-Services, lädt Dokumente hoch und erhält automatisch eine technische SIP-Nummer von Spitch. Die Call-Weiterleitung kann jederzeit aktiviert oder deaktiviert werden, was maximale Flexibilität und BCM-Sicherheit gewährleistet.": "De l'inscription à la configuration technique - votre client s'inscrit auprès de PowerOn pour les services téléphoniques, télécharge des documents et reçoit automatiquement un numéro SIP technique de Spitch. Le transfert d'appel peut être activé ou désactivé à tout moment, garantissant une flexibilité maximale et la sécurité BCM.", + "KI-gestützte Dokumentengenerierung:": "Génération de Documents alimentée par l'IA:", + "Unsere bereits aktive Dokumenten-Extraktions-Engine generiert automatisch personalisierte Dokumente für Spitch, basierend auf Mandantenspezifischen Daten. Die KI nutzt FAQ-Datenbanken, Mitarbeiterinformationen und Service-Details, um jeden Anruf kontextuell und hochpersonalisiert zu gestalten.": "Notre moteur d'extraction de documents déjà actif génère automatiquement des documents personnalisés pour Spitch basés sur les données spécifiques au client. L'IA utilise les bases de données FAQ, les informations employés et les détails de service pour rendre chaque appel contextuel et hautement personnalisé.", + "Echtzeit-Datensynchronisation:": "Synchronisation de Données en Temps Réel:", + "Spitch prüft vor jedem Anruf die Mandantenberechtigung bei PowerOn, während alle Datenänderungen zentral von PowerOn initiiert werden. Call-Transkripte werden in Echtzeit in Ihrer PowerOn-Datenbank gespeichert, mit vollständiger Mandantenisolation und Sicherheit. Bei Ausfällen werden Anrufe automatisch blockiert, um die Integrität zu gewährleisten.": "Spitch vérifie l'autorisation client avec PowerOn avant chaque appel, tandis que tous les changements de données sont initiés centralement par PowerOn. Les transcriptions d'appels sont stockées en temps réel dans votre base de données PowerOn avec une isolation complète du client et la sécurité. En cas de panne, les appels sont automatiquement bloqués pour assurer l'intégrité.", + "Kosteneinsparungen & Effizienz:": "Économies de Coûts & Efficacité:", + "Mandanten können jederzeit auf die technische SIP-Nummer umstellen und dabei erhebliche Telefoniekosten sparen. Die Integration funktioniert wie ein weiterer Connector (Outlook, SharePoint) und wird nahtlos in Ihren bestehenden Workflow integriert.": "Les clients peuvent basculer sur le numéro SIP technique à tout moment et économiser des coûts téléphoniques significatifs. L'intégration fonctionne comme un autre connecteur (Outlook, SharePoint) et est intégrée de manière transparente dans votre workflow existant.", + "Mehr erfahren": "En savoir plus", + "Zurück zur Sprach Integration": "Retour à l'Intégration Vocale", + "Mandat erstellen": "Créer le Mandat", + "Unternehmensinformationen": "Informations de l'Entreprise", + "Firmenname": "Nom de l'Entreprise", + "Geben Sie Ihren Firmennamen ein": "Entrez le nom de votre entreprise", + "Branche": "Secteur", + "z.B. Finanzdienstleistungen, Technologie, etc.": "ex. Services Financiers, Technologie, etc.", + "Geschäftszeiten": "Heures d'Ouverture", + "Zeitzone": "Fuseau Horaire", + "Kontaktinformationen": "Informations de Contact", + "kontakt@firma.com": "contact@entreprise.com", + "Telefonnummer": "Numéro de Téléphone", + "+41 123 456 789": "+41 123 456 789", + "Straße": "Rue", + "Postleitzahl": "Code Postal", + "Stadt": "Ville", + "Land": "Pays", + "Kontakte einrichten": "Configurer les Contacts", + "Möchten Sie jetzt Kontakte für Ihr Mandat einrichten? Sie können dies auch später in den Einstellungen tun.": "Souhaitez-vous configurer les contacts pour votre mandat maintenant ? Vous pouvez également le faire plus tard dans les paramètres.", + "Jetzt überspringen": "Ignorer pour l'Instant", + "Firmenname ist erforderlich": "Le nom de l'entreprise est requis", + "Branche ist erforderlich": "Le secteur d'activité est requis", + "E-Mail-Adresse ist erforderlich": "L'adresse email est requise", + "Bitte geben Sie eine gültige E-Mail-Adresse ein": "Veuillez entrer une adresse email valide", + "Telefonnummer ist erforderlich": "Le numéro de téléphone est requis", + "Straße ist erforderlich": "La rue est requise", + "Postleitzahl ist erforderlich": "Le code postal est requis", + "Stadt ist erforderlich": "La ville est requise", + "Land ist erforderlich": "Le pays est requis", + "✓ Mandat eingereicht": "✓ Mandat Soumis", + "Neu starten": "Recommencer", + "Mandat erfolgreich eingereicht!": "Mandat Soumis avec Succès !", + "Vielen Dank für Ihr Interesse an unserer Sprach Integration powered by Spitch.ai. Wir haben Ihr Mandat erhalten und werden es in Kürze überprüfen.": "Merci pour votre intérêt pour notre Intégration Vocale powered by Spitch.ai. Nous avons reçu votre mandat et l'examinerons sous peu.", + "Eingereichte Daten:": "Données Soumises :", + "Firma": "Entreprise", + "Telefon": "Téléphone", + "Adresse": "Adresse", + "Was passiert als nächstes?": "Que se passe-t-il ensuite ?", + "E-Mail-Bestätigung": "Confirmation par Email", + "Sie erhalten in den nächsten Minuten eine Bestätigungs-E-Mail.": "Vous recevrez un email de confirmation dans les prochaines minutes.", + "Überprüfungsprozess": "Processus de Révision", + "Unser Team wird Ihr Mandat innerhalb von 1-2 Werktagen überprüfen.": "Notre équipe examinera votre mandat dans les 1-2 jours ouvrables.", + "Einrichtungsanruf": "Appel de Configuration", + "Bei Genehmigung planen wir einen Einrichtungsanruf zur Konfiguration Ihrer Integration.": "Si approuvé, nous planifierons un appel de configuration pour configurer votre intégration.", + "Fragen?": "Questions ?", + "Falls Sie Fragen zu Ihrem Mandat oder dem Integrationsprozess haben, zögern Sie nicht, unser Support-Team zu kontaktieren.": "Si vous avez des questions sur votre mandat ou le processus d'intégration, n'hésitez pas à contacter notre équipe de support.", + "Sprach-Einstellungen": "Paramètres Vocaux", + "Neues Transkript": "Nouvelle Transcription", + "Aktuelle Transkripte": "Transcriptions Récentes", + "Keine Transkripte vorhanden": "Aucune transcription disponible", + "Dauer": "Durée", + "Transkript": "Transcription", + "Transkript wird verarbeitet...": "Traitement de la transcription...", + "Verarbeitung": "En cours", + "Zugriff verweigert": "Accès Refusé", + "Sie müssen sich zuerst für die Sprach-Integration anmelden, um auf die Transkriptverwaltung zuzugreifen.": "Vous devez d'abord vous inscrire à l'intégration vocale pour accéder à la gestion des transcriptions.", + "Jetzt anmelden": "S'inscrire Maintenant", + "Betreff": "Sujet", + "Startzeit": "Heure de Début", + "Endzeit": "Heure de Fin", + "Anrufer": "Appelant", + "Empfänger": "Destinataire", + "Tags": "Étiquettes", + "Sprach-Integration Einstellungen": "Paramètres d'Intégration Vocale", + "Verwalten Sie Ihre Sprach-Integrations-Konfiguration und Einstellungen.": "Gérez votre configuration et vos préférences d'intégration vocale.", + "Geschäftszeiten & Zeitzone": "Heures d'Ouverture et Fuseau Horaire", + "Einstellungen erfolgreich gespeichert!": "Paramètres sauvegardés avec succès !", + "Fehler beim Speichern der Einstellungen. Bitte versuchen Sie es erneut.": "Échec de la sauvegarde des paramètres. Veuillez réessayer.", + "Auf Standard zurücksetzen": "Réinitialiser par Défaut", + "Sind Sie sicher, dass Sie alle Sprach-Integrations-Einstellungen zurücksetzen möchten? Diese Aktion kann nicht rückgängig gemacht werden.": "Êtes-vous sûr de vouloir réinitialiser tous les paramètres d'intégration vocale ? Cette action ne peut pas être annulée.", + "Einstellungen wurden erfolgreich zurückgesetzt.": "Les paramètres ont été réinitialisés avec succès.", + "Keine Sprach-Integrations-Daten gefunden. Bitte melden Sie sich zuerst an, um auf die Einstellungen zuzugreifen.": "Aucune donnée d'intégration vocale trouvée. Veuillez d'abord vous inscrire pour accéder aux paramètres.", + "Information": "Information", + "Ihre Anfrage wird verarbeitet...": "Traitement de votre demande...", + "Upload fehlgeschlagen. Bitte versuchen Sie es erneut.": "Échec du téléchargement. Veuillez réessayer.", + "Datei bereits vorhanden": "Fichier Déjà Existant", + "Die Datei \"{fileName}\" existiert bereits mit identischem Inhalt. Die vorhandene Datei wird wiederverwendet.": "Le fichier \"{fileName}\" existe déjà avec un contenu identique. Le fichier existant sera réutilisé.", + "Automatisierungen": "Automatisations", + "Workflow-Automatisierungen verwalten": "Gérer les automatisations de workflow", + "Geplante und automatisierte Workflows": "Workflows planifiés et automatisés", + "Neue Automatisierung": "Nouvelle Automatisation", + "Ausführen": "Exécuter", + "Neue Automatisierung erstellen": "Créer une Nouvelle Automatisation", + "Automatisierung erfolgreich erstellt": "Automatisation créée avec succès", + "Fehler beim Erstellen der Automatisierung": "Erreur lors de la création de l'automatisation", + "Basisdaten": "Données de Base", + "Grundlegende Daten und Ressourcen": "Données et ressources de base", + "Werkzeuge": "Outils", + "Werkzeuge und Hilfsmittel": "Outils et utilitaires", + "Verwaltungs- und Management-Tools": "Outils d'administration et de gestion", + "Dieser Bereich enthält alle Verwaltungs- und Management-Tools für Ihren Arbeitsbereich.": "Cette section contient tous les outils d'administration et de gestion pour votre espace de travail.", + "Verfügbare Tools": "Outils Disponibles", + "Management-Tools umfassen:": "Les outils de gestion incluent:", + "Dateiverwaltung - Dokumente hochladen und organisieren": "Gestion des Fichiers - Télécharger et organiser les documents", + "Benutzerverwaltung - Teammitglieder und Berechtigungen verwalten": "Gestion des Utilisateurs - Gérer les membres de l'équipe et les permissions", + "Systemeinstellungen - Arbeitsbereich-Einstellungen konfigurieren": "Paramètres Système - Configurer les paramètres de l'espace de travail", + "Datenverwaltung - Datenimporte und -exporte verwalten": "Gestion des Données - Gérer les imports et exports de données", + "Mandate": "Mandats", + "Mandate und Berechtigungen verwalten": "Gérer les mandats et les permissions", + "Mandatsverwaltung": "Gestion des mandats", + "Verwalten Sie Mandate und deren zugehörige Berechtigungen.": "Gérez les mandats et leurs permissions associées.", + "Mandat hinzufügen": "Ajouter un mandat", + "Neues Mandat erstellen": "Créer un nouveau mandat", + "Mandat erfolgreich erstellt": "Mandat créé avec succès", + "Fehler beim Erstellen des Mandats": "Erreur lors de la création du mandat", + "RBAC-Regeln": "Règles RBAC", + "Rollenbasierte Zugriffssteuerungsregeln": "Règles de contrôle d'accès basé sur les rôles", + "RBAC-Regelverwaltung": "Gestion des règles RBAC", + "Konfigurieren und verwalten Sie rollenbasierte Zugriffssteuerungsregeln.": "Configurez et gérez les règles de contrôle d'accès basé sur les rôles.", + "RBAC-Regel hinzufügen": "Ajouter une règle RBAC", + "Neue RBAC-Regel erstellen": "Créer une nouvelle règle RBAC", + "RBAC-Regel erfolgreich erstellt": "Règle RBAC créée avec succès", + "Fehler beim Erstellen der RBAC-Regel": "Erreur lors de la création de la règle RBAC", + "RBAC-Rollen": "Rôles RBAC", + "Rollenverwaltung": "Gestion des rôles", + "RBAC-Rollenverwaltung": "Gestion des rôles RBAC", + "Erstellen und verwalten Sie RBAC-Rollen und deren Berechtigungen.": "Créez et gérez les rôles RBAC et leurs permissions.", + "Rolle hinzufügen": "Ajouter un rôle", + "Neue Rolle erstellen": "Créer un nouveau rôle", + "Rolle erfolgreich erstellt": "Rôle créé avec succès", + "Fehler beim Erstellen der Rolle": "Erreur lors de la création du rôle", + "Admin-Einstellungen": "Paramètres Admin", + "Administrative Einstellungen": "Paramètres administratifs", + "Konfigurieren Sie administrative Einstellungen und Systempräferenzen.": "Configurez les paramètres administratifs et les préférences système.", + "Start": "Démarrage", + "Willkommen in Ihrem Arbeitsbereich": "Bienvenue dans votre espace de travail", + "Dies ist Ihr Ausgangspunkt für den Zugriff auf alle Arbeitsbereich-Features und -Tools.": "Ceci est votre point de départ pour accéder à toutes les fonctionnalités et outils de votre espace de travail.", + "Schnellzugriff": "Accès Rapide", + "Beginnen Sie mit:": "Commencez avec :", + "Schnellzugriff - Springen Sie zu häufig verwendeten Features": "Accès Rapide - Accédez rapidement aux fonctionnalités fréquemment utilisées", + "Letzte Aktivitäten - Sehen Sie Ihre neueste Arbeit": "Activités Récentes - Consultez votre travail le plus récent", + "Übersicht - Sehen Sie den Arbeitsbereich-Status und Updates": "Aperçu - Consultez le statut et les mises à jour de l'espace de travail", + "Navigation - Erkunden Sie alle verfügbaren Tools": "Navigation - Explorez tous les outils disponibles", + "Projekte": "Projets", + "Projektverwaltung": "Gestion de projets", + "Projektverwaltung und -organisation": "Gestion et organisation de projets", + "Suchen Sie nach Standorten über Adresse oder Koordinaten, oder verwenden Sie natürliche Sprache, um Projekte zu erstellen und zu verwalten.": "Recherchez des emplacements par adresse ou coordonnées, ou utilisez le langage naturel pour créer et gérer des projets.", + "Befehl eingeben (z.B., \"Erstelle ein neues Projekt namens 'Hauptstrasse 42'\")": "Entrez une commande (par exemple, \"Créer un nouveau projet nommé 'Rue Principale 42'\")", + "Noch keine Befehle ausgeführt. Senden Sie einen Befehl, um Ergebnisse hier zu sehen.": "Aucune commande exécutée pour le moment. Envoyez une commande pour voir les résultats ici.", + "Datenverwaltung": "Gestion des données", + "Datenverwaltung mit Tabellen": "Gestion des données avec des tableaux", + "Verwalten Sie Daten über Tabellen. Wählen Sie eine Tabelle aus oder verwenden Sie natürliche Sprache, um Befehle auszuführen.": "Gérez les données via des tableaux. Sélectionnez un tableau ou utilisez le langage naturel pour exécuter des commandes.", + "Sie können auch auf den Upload-Button klicken": "Vous pouvez aussi cliquer sur le bouton de téléchargement", + "Dateien werden verarbeitet...": "Traitement des fichiers...", + "Fehler beim Verarbeiten der Dateien": "Erreur lors du traitement des fichiers", + "Treuhand": "Fiduciaire", + "Treuhandverwaltung": "Gestion Fiduciaire", + "Verwaltung von Treuhand-Organisationen, Verträgen und Buchungen": "Gestion des organisations fiduciaires, contrats et réservations", + "Organisationen": "Organisations", + "Trustee-Organisationen verwalten": "Gérer les organisations fiduciaires", + "Verwaltung der Treuhand-Organisationen": "Gestion des organisations fiduciaires", + "Neue Organisation": "Nouvelle Organisation", + "z.B. treuhand-ag-zuerich": "ex. fiduciaire-ag-zurich", + "Bezeichnung": "Libellé", + "z.B. Treuhand AG Zürich": "ex. Fiduciaire AG Zurich", + "Neue Organisation erstellen": "Créer une nouvelle organisation", + "Organisation erfolgreich erstellt": "Organisation créée avec succès", + "Fehler beim Erstellen der Organisation": "Erreur lors de la création de l'organisation", + "Rollen": "Rôles", + "Trustee-Rollen verwalten": "Gérer les rôles fiduciaires", + "Verwaltung der Feature-spezifischen Rollen": "Gestion des rôles spécifiques à la fonctionnalité", + "Neue Rolle": "Nouveau Rôle", + "Rollen-ID": "ID du rôle", + "z.B. admin, operate, userreport": "ex. admin, operate, userreport", + "Beschreibung": "Description", + "Beschreibung der Rolle": "Description du rôle", + "Zugriff": "Accès", + "Benutzer-Zugriff verwalten": "Gérer les accès utilisateurs", + "Verwaltung der Benutzerzugriffe auf Organisationen": "Gestion des accès utilisateurs aux organisations", + "Neuer Zugriff": "Nouvel Accès", + "Organisation": "Organisation", + "Rolle": "Rôle", + "Vertrag (optional)": "Contrat (optionnel)", + "Leer = Zugriff auf alle Verträge": "Vide = Accès à tous les contrats", + "Neuen Zugriff erstellen": "Créer un nouvel accès", + "Zugriff erfolgreich erstellt": "Accès créé avec succès", + "Fehler beim Erstellen des Zugriffs": "Erreur lors de la création de l'accès", + "Verträge": "Contrats", + "Kundenverträge verwalten": "Gérer les contrats clients", + "Verwaltung der Kundenverträge": "Gestion des contrats clients", + "Neuer Vertrag": "Nouveau Contrat", + "z.B. Muster AG 2026": "ex. Muster AG 2026", + "Neuen Vertrag erstellen": "Créer un nouveau contrat", + "Vertrag erfolgreich erstellt": "Contrat créé avec succès", + "Fehler beim Erstellen des Vertrags": "Erreur lors de la création du contrat", + "Dokumente": "Documents", + "Belege verwalten": "Gérer les pièces justificatives", + "Verwaltung der Dokumente und Belege": "Gestion des documents et pièces justificatives", + "Neues Dokument": "Nouveau Document", + "Vertrag": "Contrat", + "z.B. Beleg.pdf": "ex. Justificatif.pdf", + "Dateityp": "Type de fichier", + "Neues Dokument erstellen": "Créer un nouveau document", + "Dokument erfolgreich erstellt": "Document créé avec succès", + "Fehler beim Erstellen des Dokuments": "Erreur lors de la création du document", + "Positionen": "Positions", + "Buchungspositionen verwalten": "Gérer les positions de réservation", + "Verwaltung der Buchungspositionen (Speseneinträge)": "Gestion des positions de réservation (entrées de dépenses)", + "Neue Position": "Nouvelle Position", + "Valutadatum": "Date de valeur", + "Name des Unternehmens": "Nom de l'entreprise", + "Buchungswährung": "Devise de comptabilisation", + "Buchungsbetrag": "Montant de comptabilisation", + "Originalwährung": "Devise d'origine", + "Originalbetrag": "Montant d'origine", + "MwSt %": "TVA %", + "MwSt Betrag": "Montant TVA", + "Neue Position erstellen": "Créer une nouvelle position", + "Position erfolgreich erstellt": "Position créée avec succès", + "Fehler beim Erstellen der Position": "Erreur lors de la création de la position", + "UI-Sprachen": "Langues de l’UI", + "Globale Sprachsets verwalten (SysAdmin).": "Gérer les jeux de langue globaux (SysAdmin).", + "Alle aktualisieren": "Tout mettre à jour", + "Alle Nicht-Standard-Sprachsets jetzt mit dem deutschen Master synchronisieren?": "Synchroniser maintenant tous les jeux (sauf défaut) avec l’allemand ?", + "Neue Sprache": "Nouvelle langue", + "Anzeigename": "Nom d’affichage", + "Hinzufügen": "Ajouter", + "Keine Einträge": "Aucune entrée", + "Die Erstellung einer neuen Sprache kann AI-Guthaben auf Ihrem Mandats-Pool belasten. Fortfahren?": "Créer une nouvelle langue peut consommer des crédits IA sur le pool du mandat. Continuer ?", + "Sprachset {code} wirklich löschen?": "Supprimer vraiment le jeu de langue {code} ?", + "Fortfahren": "Continuer" + }, + "status": "complete", + "isDefault": false + } +] \ No newline at end of file diff --git a/modules/routes/routeBilling.py b/modules/routes/routeBilling.py index 5029e485..110f563c 100644 --- a/modules/routes/routeBilling.py +++ b/modules/routes/routeBilling.py @@ -179,47 +179,6 @@ def _isMemberOfMandate(ctx: RequestContext, targetMandateId: str) -> bool: return False -def _filterTransactionsByScope(transactions: list, scope: BillingDataScope) -> list: - """ - Filter a list of transaction dicts based on the user's BillingDataScope. - - Rules: - - SysAdmin: no filter - - Mandate-Admin: all transactions in their admin mandates - - Feature-Instance-Admin: transactions for their admin feature instances - - Regular user: only transactions where createdByUserId/userId matches - """ - if scope.isGlobalAdmin: - return transactions - - adminMandateSet = set(scope.adminMandateIds) - adminFiSet = set(scope.adminFeatureInstanceIds) - memberMandateSet = set(scope.memberMandateIds) - - result = [] - for t in transactions: - mandateId = t.get("mandateId") - fiId = t.get("featureInstanceId") - txUserId = t.get("createdByUserId") or t.get("userId") - - # Mandate admin → sees all transactions in their mandate - if mandateId and mandateId in adminMandateSet: - result.append(t) - continue - - # Feature instance admin → sees all transactions for their instances - if fiId and fiId in adminFiSet: - result.append(t) - continue - - # Regular member → only own transactions - if mandateId and mandateId in memberMandateSet: - if txUserId and txUserId == scope.userId: - result.append(t) - continue - - return result - # ============================================================================= # Request/Response Models @@ -1429,32 +1388,20 @@ def _enrichTransactionRows(transactions) -> List[Dict[str, Any]]: return result -def _buildTransactionsList(ctx: RequestContext, targetMandateId: str) -> List[Dict[str, Any]]: - """Build the full enriched transactions list for a mandate.""" +def _buildTransactionsList(ctx: RequestContext, targetMandateId: str, paginationParams: Optional[PaginationParams] = None) -> tuple: + """Build enriched transactions for a mandate. Returns (items, paginatedResult|None).""" billingInterface = getBillingInterface(ctx.user, targetMandateId) - transactions = billingInterface.getTransactionsByMandate(targetMandateId, limit=5000) - result = [] - for t in transactions: - row = TransactionResponse( - id=t.get("id"), - accountId=t.get("accountId"), - transactionType=TransactionTypeEnum(t.get("transactionType", "DEBIT")), - amount=t.get("amount", 0.0), - description=t.get("description", ""), - referenceType=ReferenceTypeEnum(t["referenceType"]) if t.get("referenceType") else None, - workflowId=t.get("workflowId"), - featureCode=t.get("featureCode"), - featureInstanceId=t.get("featureInstanceId"), - aicoreProvider=t.get("aicoreProvider"), - aicoreModel=t.get("aicoreModel"), - createdByUserId=t.get("createdByUserId"), - createdAt=t.get("sysCreatedAt") - ) - result.append(row.model_dump()) + if paginationParams: + paginatedResult = billingInterface.getTransactionsByMandate(targetMandateId, pagination=paginationParams) + transactions = paginatedResult.items if hasattr(paginatedResult, 'items') else paginatedResult.get("items", []) + else: + defaultPagination = PaginationParams(page=1, pageSize=200, sort=[{"field": "sysCreatedAt", "direction": "desc"}]) + paginatedResult = billingInterface.getTransactionsByMandate(targetMandateId, pagination=defaultPagination) + transactions = paginatedResult.items if hasattr(paginatedResult, 'items') else paginatedResult.get("items", []) - _attachCreatedByUserNamesToTransactionRows(result) - return result + result = _enrichTransactionRows(transactions) + return result, paginatedResult @router.get("/admin/transactions/{targetMandateId}") @@ -1463,7 +1410,6 @@ def getTransactionsAdmin( request: Request, targetMandateId: str = Path(..., description="Mandate ID"), pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams"), - limit: int = Query(default=100, ge=1, le=1000), ctx: RequestContext = Depends(getRequestContext), ): """Get all transactions for a mandate with pagination support.""" @@ -1480,26 +1426,22 @@ def getTransactionsAdmin( except (json.JSONDecodeError, ValueError) as e: raise HTTPException(status_code=400, detail=f"Invalid pagination parameter: {str(e)}") - if paginationParams: - # DB-level pagination — enrich only the returned page - billingInterface = getBillingInterface(ctx.user, targetMandateId) - result = billingInterface.getTransactionsByMandate(targetMandateId, pagination=paginationParams) - transactions = result.items if hasattr(result, 'items') else result - enrichedItems = _enrichTransactionRows(transactions) - return { - "items": enrichedItems, - "pagination": PaginationMetadata( - currentPage=paginationParams.page, - pageSize=paginationParams.pageSize, - totalItems=result.totalItems if hasattr(result, 'totalItems') else len(enrichedItems), - totalPages=result.totalPages if hasattr(result, 'totalPages') else 0, - sort=paginationParams.sort, - filters=paginationParams.filters, - ).model_dump(), - } + enriched, paginatedResult = _buildTransactionsList(ctx, targetMandateId, paginationParams) + totalItems = getattr(paginatedResult, 'totalItems', len(enriched)) if paginatedResult else len(enriched) + totalPages = getattr(paginatedResult, 'totalPages', 0) if paginatedResult else 0 - enriched = _buildTransactionsList(ctx, targetMandateId) - return {"items": enriched, "pagination": None} + paginationMeta = None + if paginationParams: + paginationMeta = PaginationMetadata( + currentPage=paginationParams.page, + pageSize=paginationParams.pageSize, + totalItems=totalItems, + totalPages=totalPages, + sort=paginationParams.sort, + filters=paginationParams.filters, + ).model_dump() + + return {"items": enriched, "pagination": paginationMeta} except HTTPException: raise @@ -1535,16 +1477,15 @@ def getTransactionFilterValues( except (json.JSONDecodeError, ValueError): pass - # Try SQL DISTINCT for native DB columns; fallback to in-memory for enriched columns (e.g. userName) try: - rootBillingInterface = _getRootInterface() - recordFilter = {"mandateId": targetMandateId} - values = rootBillingInterface.db.getDistinctColumnValues( - BillingTransaction, column, crossFilterParams, recordFilter + billingInterface = getBillingInterface(ctx.user, targetMandateId) + return billingInterface.getTransactionDistinctValues( + mandateIds=[targetMandateId], + column=column, + pagination=crossFilterParams, ) - return sorted(values, key=lambda v: str(v).lower()) except Exception: - enriched = _buildTransactionsList(ctx, targetMandateId) + enriched, _ = _buildTransactionsList(ctx, targetMandateId) crossFiltered = _applyFiltersAndSort(enriched, crossFilterParams) return _extractDistinctValues(crossFiltered, column) except Exception as e: @@ -1703,20 +1644,16 @@ def getUserViewStatistics( - period='day': returns daily time series for the given month/year """ try: - from datetime import timedelta - if year is None: year = datetime.now().year - + if period == "day" and not month: month = datetime.now().month - + billingInterface = getBillingInterface(ctx.user, ctx.mandateId) - - # Evaluate RBAC scope + rbacScope = _getBillingDataScope(ctx.user) - - # Determine mandate IDs for data loading + if rbacScope.isGlobalAdmin: loadMandateIds = None else: @@ -1724,151 +1661,75 @@ def getUserViewStatistics( if not loadMandateIds: logger.warning("No mandate IDs found for user") return ViewStatisticsResponse() - - # Scope=mandate: restrict to specific mandate + if scope == "mandate" and mandateId: loadMandateIds = [mandateId] - - # Get all transactions - allTransactions = billingInterface.getUserTransactionsForMandates(loadMandateIds, limit=10000) - - # Apply RBAC filter (respects admin/user roles) - allTransactions = _filterTransactionsByScope(allTransactions, rbacScope) - - # Scope=personal: further filter to only own transactions - if scope == "personal": - userId = str(ctx.user.id) - allTransactions = [ - t for t in allTransactions - if (t.get("createdByUserId") or t.get("userId")) == userId - ] - - logger.info(f"View statistics: {len(allTransactions)} RBAC-filtered transactions for period={period}, year={year}, month={month}") - - # Calculate date range + + personalUserId = str(ctx.user.id) if scope == "personal" else None + if period == "day": startDate = date(year, month, 1) - if month == 12: - endDate = date(year + 1, 1, 1) - else: - endDate = date(year, month + 1, 1) + endDate = date(year + 1, 1, 1) if month == 12 else date(year, month + 1, 1) else: startDate = date(year, 1, 1) endDate = date(year + 1, 1, 1) - - # Filter by date range and only DEBIT transactions - debits = [] - skippedNoDate = 0 - skippedDateRange = 0 - skippedNotDebit = 0 - - for t in allTransactions: - createdAt = t.get("sysCreatedAt") - if not createdAt: - skippedNoDate += 1 - continue - - # Parse date from various formats (DB stores as DOUBLE PRECISION / Unix timestamp) - txDate = None - if isinstance(createdAt, (int, float)): - txDate = datetime.fromtimestamp(createdAt).date() - elif isinstance(createdAt, datetime): - txDate = createdAt.date() - elif isinstance(createdAt, date) and not isinstance(createdAt, datetime): - txDate = createdAt - elif isinstance(createdAt, str): - try: - # Try as float string first (Unix timestamp) - txDate = datetime.fromtimestamp(float(createdAt)).date() - except (ValueError, TypeError): - try: - txDate = datetime.fromisoformat(createdAt.replace("Z", "+00:00")).date() - except (ValueError, TypeError): - skippedNoDate += 1 - continue - else: - skippedNoDate += 1 - continue - - if txDate < startDate or txDate >= endDate: - skippedDateRange += 1 - continue - - # Compare transactionType - handle both string and enum - txType = t.get("transactionType") - txTypeStr = str(txType) if txType is not None else "" - if txTypeStr != "DEBIT" and txTypeStr != "TransactionTypeEnum.DEBIT": - # Also check .value for enum objects - txTypeValue = getattr(txType, 'value', txTypeStr) - if txTypeValue != "DEBIT": - skippedNotDebit += 1 - continue - - t["_txDate"] = txDate - debits.append(t) - - logger.info(f"View statistics: {len(debits)} DEBIT transactions after filter. " - f"Skipped: noDate={skippedNoDate}, dateRange={skippedDateRange}, notDebit={skippedNotDebit}") - - # Aggregate totals - totalCost = sum(t.get("amount", 0) for t in debits) - - costByProvider: Dict[str, float] = {} - costByModel: Dict[str, float] = {} - costByFeature: Dict[str, float] = {} + + startTs = datetime.combine(startDate, datetime.min.time()).timestamp() + endTs = datetime.combine(endDate, datetime.min.time()).timestamp() + + agg = billingInterface.getTransactionStatisticsAggregated( + mandateIds=loadMandateIds, + scope=scope, + userId=personalUserId, + startTs=startTs, + endTs=endTs, + period=period, + ) + + logger.info( + f"View statistics (SQL-aggregated): totalCost={agg['totalCost']}, " + f"count={agg['transactionCount']}, period={period}, year={year}, month={month}" + ) + + allAccounts = agg.get("_allAccounts", []) + accountToMandate: Dict[str, str] = {} + for acc in allAccounts: + accountToMandate[acc.get("id", "")] = acc.get("mandateId", "") + + from modules.interfaces.interfaceDbApp import getInterface as getAppInterface + mandateIdsForLookup = list(set(accountToMandate.values())) + mandateMap: Dict[str, str] = {} + if mandateIdsForLookup: + rootIface = getAppInterface(ctx.user) + mandatesById = rootIface.getMandatesByIds(mandateIdsForLookup) + for mid, m in mandatesById.items(): + mandateMap[mid] = getattr(m, "name", mid) or mid + + def _mandateName(accountId: str) -> str: + mid = accountToMandate.get(accountId, "") + return mandateMap.get(mid, mid or "unknown") + costByMandate: Dict[str, float] = {} - - for t in debits: - provider = t.get("aicoreProvider") or "unknown" - costByProvider[provider] = costByProvider.get(provider, 0) + t.get("amount", 0) - - model = t.get("aicoreModel") or "unknown" - costByModel[model] = costByModel.get(model, 0) + t.get("amount", 0) - - mandate = t.get("mandateName") or t.get("mandateId") or "unknown" - featureCode = t.get("featureCode") or "unknown" - featureKey = f"{mandate} / {featureCode}" - costByFeature[featureKey] = costByFeature.get(featureKey, 0) + t.get("amount", 0) - - mandate = t.get("mandateName") or t.get("mandateId") or "unknown" - costByMandate[mandate] = costByMandate.get(mandate, 0) + t.get("amount", 0) - - # Build time series (raw data only, no display logic) - timeSeries = [] - if period == "day": - numDays = (endDate - startDate).days - for day in range(numDays): - d = startDate + timedelta(days=day) - dayCost = sum(t.get("amount", 0) for t in debits if t["_txDate"] == d) - dayCount = sum(1 for t in debits if t["_txDate"] == d) - if dayCost > 0 or dayCount > 0: - timeSeries.append({ - "date": d.isoformat(), - "cost": round(dayCost, 4), - "count": dayCount - }) - else: - for m in range(1, 13): - mStart = date(year, m, 1) - mEnd = date(year, m + 1, 1) if m < 12 else date(year + 1, 1, 1) - monthCost = sum(t.get("amount", 0) for t in debits if mStart <= t["_txDate"] < mEnd) - monthCount = sum(1 for t in debits if mStart <= t["_txDate"] < mEnd) - timeSeries.append({ - "date": f"{year}-{m:02d}", - "cost": round(monthCost, 4), - "count": monthCount - }) - + for accId, total in agg.get("costByAccountId", {}).items(): + name = _mandateName(accId) + costByMandate[name] = costByMandate.get(name, 0) + total + + costByFeature: Dict[str, float] = {} + for entry in agg.get("costByAccountFeature", []): + name = _mandateName(entry["accountId"]) + key = f"{name} / {entry['featureCode']}" + costByFeature[key] = costByFeature.get(key, 0) + entry["total"] + return ViewStatisticsResponse( - totalCost=round(totalCost, 4), - transactionCount=len(debits), - costByProvider=costByProvider, - costByModel=costByModel, + totalCost=agg["totalCost"], + transactionCount=agg["transactionCount"], + costByProvider=agg.get("costByProvider", {}), + costByModel=agg.get("costByModel", {}), costByFeature=costByFeature, costByMandate=costByMandate, - timeSeries=timeSeries + timeSeries=agg.get("timeSeries", []), ) - + except Exception as e: logger.error(f"Error getting view statistics: {e}", exc_info=True) raise HTTPException(status_code=500, detail=str(e)) @@ -1879,77 +1740,61 @@ def getUserViewStatistics( def getUserViewTransactions( request: Request, pagination: Optional[str] = Query(None, description="JSON-encoded PaginationParams object"), + scope: str = Query(default="all", description="Scope: 'personal' (own costs only), 'mandate' (filter by mandateId), 'all' (RBAC-filtered)"), + mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"), ctx: RequestContext = Depends(getRequestContext) ) -> PaginatedResponse[UserTransactionResponse]: """ Get user-level transactions with pagination support. - RBAC filtering: - - SysAdmin: sees all user transactions across all mandates - - Mandate-Admin: sees all user transactions for mandates they administrate - - Feature-Instance-Admin: sees transactions for their feature instances - - Regular user: sees only their own transactions + Scope (same contract as /view/statistics): + - personal: only the current user's own transactions (ignores admin role) + - mandate: transactions for a specific mandate (requires mandateId parameter) + - all: RBAC-filtered (SysAdmin sees everything, admin sees mandate, user sees own) Query Parameters: - pagination: JSON-encoded PaginationParams object, or None for no pagination + - scope: 'personal', 'mandate', or 'all' + - mandateId: required when scope='mandate' """ try: billingInterface = getBillingInterface(ctx.user, ctx.mandateId) - - # Parse pagination params + paginationParams = None if pagination: import json paginationDict = json.loads(pagination) paginationDict = normalize_pagination_dict(paginationDict) paginationParams = PaginationParams(**paginationDict) - - # Evaluate RBAC scope - scope = _getBillingDataScope(ctx.user) - - # Determine mandate IDs for data loading - if scope.isGlobalAdmin: - mandateIds = None # Load all + + rbacScope = _getBillingDataScope(ctx.user) + + if rbacScope.isGlobalAdmin: + loadMandateIds = None else: - # Load data for all mandates the user belongs to (admin + member) - mandateIds = scope.adminMandateIds + scope.memberMandateIds - if not mandateIds: + loadMandateIds = rbacScope.adminMandateIds + rbacScope.memberMandateIds + if not loadMandateIds: return PaginatedResponse(items=[], pagination=None) - - allTransactions = billingInterface.getUserTransactionsForMandates(mandateIds, limit=10000) - - # Apply RBAC filter - allTransactions = _filterTransactionsByScope(allTransactions, scope) - - logger.debug(f"RBAC-filtered {len(allTransactions)} transactions for user {ctx.user.id}") - - # Convert to response objects as dicts for filtering/sorting - transactionDicts = [] - for t in allTransactions: - transactionDicts.append({ - "id": t.get("id"), - "accountId": t.get("accountId"), - "transactionType": t.get("transactionType", "DEBIT"), - "amount": t.get("amount", 0.0), - "description": t.get("description", ""), - "referenceType": t.get("referenceType"), - "workflowId": t.get("workflowId"), - "featureCode": t.get("featureCode"), - "featureInstanceId": t.get("featureInstanceId"), - "aicoreProvider": t.get("aicoreProvider"), - "aicoreModel": t.get("aicoreModel"), - "createdByUserId": t.get("createdByUserId"), - "createdAt": t.get("sysCreatedAt"), - "mandateId": t.get("mandateId"), - "mandateName": t.get("mandateName"), - "userId": t.get("userId"), - "userName": t.get("userName"), - }) - - # Apply filters and sorting - filteredDicts = _applyFiltersAndSort(transactionDicts, paginationParams) - - # Convert to response models + + if scope == "mandate" and mandateId: + loadMandateIds = [mandateId] + + effectiveScope = scope + personalUserId = str(ctx.user.id) if scope == "personal" else None + + if not paginationParams: + paginationParams = PaginationParams(page=1, pageSize=50) + + result = billingInterface.getTransactionsForMandatesPaginated( + mandateIds=loadMandateIds, + pagination=paginationParams, + scope=effectiveScope, + userId=personalUserId, + ) + + logger.debug(f"SQL-paginated {result.totalItems} transactions for user {ctx.user.id} " + f"(scope={scope}, mandateId={mandateId}, page={paginationParams.page})") + def _toResponse(d): return UserTransactionResponse( id=d.get("id"), @@ -1964,38 +1809,25 @@ def getUserViewTransactions( aicoreProvider=d.get("aicoreProvider"), aicoreModel=d.get("aicoreModel"), createdByUserId=d.get("createdByUserId"), - createdAt=d.get("createdAt"), + createdAt=d.get("sysCreatedAt") or d.get("createdAt"), mandateId=d.get("mandateId"), mandateName=d.get("mandateName"), userId=d.get("userId"), userName=d.get("userName") ) - - if paginationParams: - import math - totalItems = len(filteredDicts) - totalPages = math.ceil(totalItems / paginationParams.pageSize) if totalItems > 0 else 0 - startIdx = (paginationParams.page - 1) * paginationParams.pageSize - endIdx = startIdx + paginationParams.pageSize - paginatedDicts = filteredDicts[startIdx:endIdx] - - return PaginatedResponse( - items=[_toResponse(d) for d in paginatedDicts], - pagination=PaginationMetadata( - currentPage=paginationParams.page, - pageSize=paginationParams.pageSize, - totalItems=totalItems, - totalPages=totalPages, - sort=paginationParams.sort, - filters=paginationParams.filters - ) + + return PaginatedResponse( + items=[_toResponse(d) for d in result.items], + pagination=PaginationMetadata( + currentPage=paginationParams.page, + pageSize=paginationParams.pageSize, + totalItems=result.totalItems, + totalPages=result.totalPages, + sort=paginationParams.sort, + filters=paginationParams.filters, ) - else: - return PaginatedResponse( - items=[_toResponse(d) for d in filteredDicts], - pagination=None - ) - + ) + except Exception as e: logger.error(f"Error getting user view transactions: {e}") raise HTTPException(status_code=500, detail=str(e)) @@ -2007,42 +1839,49 @@ def getUserViewTransactionsFilterValues( request: Request, column: str = Query(..., description="Column key"), pagination: Optional[str] = Query(None, description="JSON-encoded current filters"), + scope: str = Query(default="all", description="Scope: 'personal', 'mandate', 'all'"), + mandateId: Optional[str] = Query(None, description="Mandate ID filter (used with scope='mandate')"), ctx: RequestContext = Depends(getRequestContext) ): - """Return distinct filter values for a column in user transactions.""" + """Return distinct filter values for a column in user transactions (SQL DISTINCT).""" try: billingInterface = getBillingInterface(ctx.user, ctx.mandateId) - scope = _getBillingDataScope(ctx.user) - if scope.isGlobalAdmin: - mandateIds = None + rbacScope = _getBillingDataScope(ctx.user) + + if rbacScope.isGlobalAdmin: + loadMandateIds = None else: - mandateIds = scope.adminMandateIds + scope.memberMandateIds - if not mandateIds: + loadMandateIds = rbacScope.adminMandateIds + rbacScope.memberMandateIds + if not loadMandateIds: return [] - allTransactions = billingInterface.getUserTransactionsForMandates(mandateIds, limit=10000) - allTransactions = _filterTransactionsByScope(allTransactions, scope) - transactionDicts = [] - for t in allTransactions: - transactionDicts.append({ - "id": t.get("id"), - "accountId": t.get("accountId"), - "transactionType": t.get("transactionType", "DEBIT"), - "amount": t.get("amount", 0.0), - "description": t.get("description", ""), - "referenceType": t.get("referenceType"), - "workflowId": t.get("workflowId"), - "featureCode": t.get("featureCode"), - "featureInstanceId": t.get("featureInstanceId"), - "aicoreProvider": t.get("aicoreProvider"), - "aicoreModel": t.get("aicoreModel"), - "createdByUserId": t.get("createdByUserId"), - "createdAt": t.get("sysCreatedAt"), - "mandateId": t.get("mandateId"), - "mandateName": t.get("mandateName"), - "userId": t.get("userId"), - "userName": t.get("userName"), - }) - return _handleFilterValuesRequest(transactionDicts, column, pagination) + + if scope == "mandate" and mandateId: + loadMandateIds = [mandateId] + + crossFilterParams = None + if pagination: + try: + import json + paginationDict = json.loads(pagination) + if paginationDict: + paginationDict = normalize_pagination_dict(paginationDict) + filters = paginationDict.get("filters", {}) + filters.pop(column, None) + paginationDict["filters"] = filters + paginationDict.pop("sort", None) + crossFilterParams = PaginationParams(**paginationDict) + except (json.JSONDecodeError, ValueError): + pass + + personalUserId = str(ctx.user.id) if scope == "personal" else None + + return billingInterface.getTransactionDistinctValues( + mandateIds=loadMandateIds, + column=column, + pagination=crossFilterParams, + scope=scope, + userId=personalUserId, + ) except Exception as e: logger.error(f"Error getting filter values for user transactions: {e}") raise HTTPException(status_code=500, detail=str(e)) diff --git a/modules/routes/routeDataUsers.py b/modules/routes/routeDataUsers.py index 7cce66ca..23cd508f 100644 --- a/modules/routes/routeDataUsers.py +++ b/modules/routes/routeDataUsers.py @@ -423,23 +423,18 @@ def get_users( detail="No admin access to any mandate" ) - # Aggregate users across all admin mandates (deduplicate by user ID) - seenUserIds = set() - allUsers = [] - for mid in adminMandateIds: - mandateUsers = rootInterface.getUsersByMandate(mid) - if isinstance(mandateUsers, list): - users = mandateUsers - elif hasattr(mandateUsers, 'items'): - users = mandateUsers.items - else: - users = [] - for u in users: - uid = u.get("id") if isinstance(u, dict) else getattr(u, "id", None) - if uid and uid not in seenUserIds: - seenUserIds.add(uid) - userData = u if isinstance(u, dict) else u.model_dump() if hasattr(u, 'model_dump') else vars(u) - allUsers.append(userData) + from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel + allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds}) + uniqueUserIds = list({ + (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None)) + for um in (allUM or []) + if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None)) + }) + batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {} + allUsers = [ + u.model_dump() if hasattr(u, 'model_dump') else vars(u) + for u in batchUsers.values() + ] # Apply server-side filtering and sorting filteredUsers = _applyFiltersAndSort(allUsers, paginationParams) @@ -541,17 +536,15 @@ def get_user_filter_values( break if not adminMandateIds: return [] - seenUserIds = set() - users = [] - for mid in adminMandateIds: - mandateUsers = rootInterface.getUsersByMandate(mid) - uList = mandateUsers if isinstance(mandateUsers, list) else (mandateUsers.items if hasattr(mandateUsers, 'items') else []) - for u in uList: - uid = u.get("id") if isinstance(u, dict) else getattr(u, "id", None) - if uid and uid not in seenUserIds: - seenUserIds.add(uid) - users.append(u) - items = [u.model_dump() if hasattr(u, 'model_dump') else u for u in users] + from modules.datamodels.datamodelMembership import UserMandate as UserMandateModel + allUM = rootInterface.db.getRecordset(UserMandateModel, recordFilter={"mandateId": adminMandateIds}) + uniqueUserIds = list({ + (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None)) + for um in (allUM or []) + if (um.get("userId") if isinstance(um, dict) else getattr(um, "userId", None)) + }) + batchUsers = rootInterface.getUsersByIds(uniqueUserIds) if uniqueUserIds else {} + items = [u.model_dump() if hasattr(u, 'model_dump') else vars(u) for u in batchUsers.values()] return _handleFilterValuesRequest(items, column, pagination) except HTTPException: raise diff --git a/modules/routes/routeI18n.py b/modules/routes/routeI18n.py new file mode 100644 index 00000000..1557b278 --- /dev/null +++ b/modules/routes/routeI18n.py @@ -0,0 +1,711 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +Public and authenticated routes for UI language sets (DB-backed i18n). + +AI translation pipeline: +- create_language_set → background job translates all keys via AiObjects +- update_language_set → synchronous AI pass for added keys +- update_all → iterates non-de sets +""" + +from __future__ import annotations + +import asyncio +import json +import logging +import math +import re +from pathlib import Path +from typing import Any, Dict, List, Optional, Set + +from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, Request, UploadFile, status +from fastapi.responses import Response +from pydantic import BaseModel, Field + +from modules.auth import getCurrentUser, requireSysAdminRole +from modules.connectors.connectorDbPostgre import _get_cached_connector +from modules.datamodels.datamodelAi import ( + AiCallOptions, + AiCallRequest, + AiCallResponse, + OperationTypeEnum, + PriorityEnum, +) +from modules.datamodels.datamodelUiLanguage import UiLanguageSet +from modules.datamodels.datamodelUam import User +from modules.datamodels.datamodelNotification import NotificationType +from modules.interfaces.interfaceDbManagement import getInterface as getMgmtInterface +from modules.routes.routeNotifications import _createNotification +from modules.shared.configuration import APP_CONFIG +from modules.shared.timeUtils import getUtcTimestamp + +logger = logging.getLogger(__name__) + +router = APIRouter( + prefix="/api/i18n", + tags=["i18n"], + responses={404: {"description": "Not found"}}, +) + +_MIN_AI_BILLING_ESTIMATE_CHF = 0.01 +_TRANSLATE_BATCH_SIZE = 80 + + +def _publicMgmtDb(): + return _get_cached_connector( + dbHost=APP_CONFIG.get("DB_HOST", "localhost"), + dbDatabase="poweron_management", + dbUser=APP_CONFIG.get("DB_USER"), + dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET"), + dbPort=int(APP_CONFIG.get("DB_PORT", 5432)), + userId="__i18n_public__", + ) + + +def _row_to_public(row: dict) -> dict: + keys = row.get("keys") or {} + return { + "code": row["id"], + "label": row.get("label"), + "status": row.get("status"), + "keys": keys if isinstance(keys, dict) else {}, + } + + +def _load_master_de_keys(db) -> Dict[str, str]: + rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "de"}) + if not rows: + return {} + keys = rows[0].get("keys") or {} + return dict(keys) if isinstance(keys, dict) else {} + + +def _userMemberMandateIds(currentUser: User) -> List[str]: + from modules.interfaces.interfaceDbApp import getRootInterface + + root = getRootInterface() + memberships = root.getUserMandates(str(currentUser.id)) + out = [] + for um in memberships: + mid = getattr(um, "mandateId", None) or ( + um.get("mandateId") if isinstance(um, dict) else None + ) + if mid: + out.append(str(mid)) + return list(dict.fromkeys(out)) + + +def _mandatePassesAiPoolBilling(currentUser: User, mandateId: str, userId: str) -> bool: + from modules.interfaces.interfaceDbBilling import getInterface as getBillingInterface + + bi = getBillingInterface(currentUser, mandateId) + res = bi.checkBalance(mandateId, userId, _MIN_AI_BILLING_ESTIMATE_CHF) + return bool(res.allowed) + + +# --------------------------------------------------------------------------- +# AI Translation helpers +# --------------------------------------------------------------------------- + +_aiObjectsSingleton = None + + +async def _getAiObjects(): + """Lazy singleton — same pattern as routeFeatureWorkspace.""" + global _aiObjectsSingleton + if _aiObjectsSingleton is None: + from modules.interfaces.interfaceAiObjects import AiObjects + _aiObjectsSingleton = await AiObjects.create() + return _aiObjectsSingleton + + +def _makeBillingCallback(currentUser: User, mandateId: str): + """Return a billing callback that records each AI response cost.""" + from modules.serviceCenter.services.serviceBilling.mainServiceBilling import getService as getBillingService + + billingService = getBillingService(currentUser, mandateId) + + def _cb(response: AiCallResponse) -> None: + if not response or getattr(response, "errorCount", 0) > 0: + return + basePriceCHF = getattr(response, "priceCHF", 0.0) + if not basePriceCHF or basePriceCHF <= 0: + return + provider = getattr(response, "provider", None) or "unknown" + modelName = getattr(response, "modelName", None) or "unknown" + try: + billingService.recordUsage( + priceCHF=basePriceCHF, + aicoreProvider=provider, + aicoreModel=modelName, + description=f"i18n translation ({modelName})", + processingTime=getattr(response, "processingTime", None), + bytesSent=getattr(response, "bytesSent", None), + bytesReceived=getattr(response, "bytesReceived", None), + ) + except Exception as e: + logger.error("i18n billing callback failed: %s", e) + + return _cb + + +async def _translateBatch( + keysToTranslate: Dict[str, str], + targetLanguageLabel: str, + targetCode: str, + billingCallback=None, +) -> Dict[str, str]: + """Translate a batch of German-key → German-value pairs into *targetLanguageLabel*. + + Returns dict { germanKey: translatedValue }. + Splits into sub-batches of _TRANSLATE_BATCH_SIZE to stay within token limits. + """ + if not keysToTranslate: + return {} + + aiObjects = await _getAiObjects() + allKeys = list(keysToTranslate.items()) + totalBatches = math.ceil(len(allKeys) / _TRANSLATE_BATCH_SIZE) + result: Dict[str, str] = {} + + for batchIdx in range(totalBatches): + chunk = allKeys[batchIdx * _TRANSLATE_BATCH_SIZE : (batchIdx + 1) * _TRANSLATE_BATCH_SIZE] + payload = {k: v for k, v in chunk} + jsonPayload = json.dumps(payload, ensure_ascii=False) + + systemPrompt = ( + f"Du bist ein professioneller Übersetzer für Software-UI-Texte. " + f"Übersetze die folgenden deutschen UI-Labels ins {targetLanguageLabel} (ISO {targetCode}). " + f"Behalte Platzhalter wie {{variable}} exakt bei. " + f"Antworte NUR mit einem JSON-Objekt — gleiche Keys, übersetzte Values. Kein Markdown, kein Kommentar." + ) + + request = AiCallRequest( + prompt=f"Übersetze diese UI-Labels:\n{jsonPayload}", + context=systemPrompt, + options=AiCallOptions( + operationType=OperationTypeEnum.DATA_GENERATE, + priority=PriorityEnum.BALANCED, + compressPrompt=False, + compressContext=False, + resultFormat="json", + temperature=0.2, + ), + ) + + if billingCallback: + aiObjects.billingCallback = billingCallback + + try: + response = await aiObjects.callWithTextContext(request) + if response and response.content: + raw = response.content.strip() + if raw.startswith("```"): + raw = re.sub(r"^```[a-z]*\n?", "", raw) + raw = re.sub(r"\n?```$", "", raw) + parsed = json.loads(raw) + if isinstance(parsed, dict): + result.update(parsed) + else: + logger.warning("i18n AI batch %d/%d returned non-dict", batchIdx + 1, totalBatches) + else: + logger.warning("i18n AI batch %d/%d empty response", batchIdx + 1, totalBatches) + except json.JSONDecodeError as je: + logger.error("i18n AI batch %d/%d JSON parse error: %s", batchIdx + 1, totalBatches, je) + except Exception as e: + logger.error("i18n AI batch %d/%d failed: %s", batchIdx + 1, totalBatches, e) + finally: + aiObjects.billingCallback = None + + return result + + +def _resolveMandateIdForAiI18n(request: Request, currentUser: User) -> str: + userId = str(currentUser.id) + memberIds = _userMemberMandateIds(currentUser) + if not memberIds: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Mindestens eine Mandats-Mitgliedschaft ist für die AI-Nutzung erforderlich.", + ) + + headerRaw = ( + request.headers.get("X-Mandate-Id") or request.headers.get("x-mandate-id") or "" + ).strip() + if headerRaw: + if headerRaw not in memberIds: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="X-Mandate-Id ist kein Mandat Ihrer Mitgliedschaft.", + ) + if _mandatePassesAiPoolBilling(currentUser, headerRaw, userId): + return headerRaw + for mid in memberIds: + if _mandatePassesAiPoolBilling(currentUser, mid, userId): + return mid + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, + detail="Nicht genügend AI-Guthaben (Mandats-Pool) für diese Aktion.", + ) + + +# --------------------------------------------------------------------------- +# de-Master sync from frontend codebase +# --------------------------------------------------------------------------- + +_REPO_ROOT = Path(__file__).resolve().parents[3] +_FRONTEND_SRC = _REPO_ROOT / "frontend_nyla" / "src" + +_T_CALL_RE = re.compile( + r"""\bt\(\s*'((?:\\.|[^'])+)'\s*(?:,|\))""" +) + + +def _scanCodebaseKeys() -> Set[str]: + """Scan all .tsx/.ts files under frontend_nyla/src for t('...') calls. + + Returns the set of German plaintext keys found in the codebase. + """ + keys: Set[str] = set() + if not _FRONTEND_SRC.is_dir(): + logger.warning("i18n codebase scan: %s not found", _FRONTEND_SRC) + return keys + + for ext in ("*.tsx", "*.ts"): + for filepath in _FRONTEND_SRC.rglob(ext): + try: + content = filepath.read_text(encoding="utf-8", errors="replace") + except OSError: + continue + for m in _T_CALL_RE.finditer(content): + raw = m.group(1) + raw = raw.replace("\\'", "'") + if raw: + keys.add(raw) + return keys + + +def _syncDeMasterFromCodebase(db, userId: Optional[str]) -> Dict[str, Any]: + """Synchronise the de master set with t()-keys found in the frontend codebase. + + - Keys in codebase but not in DB → add (key = value = German plaintext) + - Keys in DB but not in codebase → remove (orphaned) + Returns summary dict. + """ + codebaseKeys = _scanCodebaseKeys() + if not codebaseKeys: + logger.warning("i18n de-sync: codebase scan returned 0 keys — aborting") + return {"added": [], "removed": [], "keysCount": 0, "error": "Codebase scan returned 0 keys"} + + rows = db.getRecordset(UiLanguageSet, recordFilter={"id": "de"}) + if not rows: + raise HTTPException(status_code=503, detail="Deutsch-Master nicht in DB vorhanden.") + + row = dict(rows[0]) + cur: Dict[str, str] = dict(row.get("keys") or {}) + dbKeys = set(cur.keys()) + + added = sorted(codebaseKeys - dbKeys) + removed = sorted(dbKeys - codebaseKeys) + + for k in removed: + del cur[k] + for k in added: + cur[k] = k + + if not added and not removed: + return {"added": [], "removed": [], "keysCount": len(cur)} + + now = getUtcTimestamp() + row["keys"] = cur + row["sysModifiedAt"] = now + row["sysModifiedBy"] = userId + db.recordModify(UiLanguageSet, "de", row) + + logger.info("i18n de-master sync: +%d added, -%d removed, total=%d", len(added), len(removed), len(cur)) + return {"added": added, "removed": removed, "keysCount": len(cur)} + + +# --- Public ----------------------------------------------------------------- + + +@router.get("/codes") +async def list_language_codes(): + db = _publicMgmtDb() + rows = db.getRecordset(UiLanguageSet) + out = [] + for r in rows: + keys = r.get("keys") or {} + out.append( + { + "code": r["id"], + "label": r.get("label"), + "status": r.get("status"), + "isDefault": bool(r.get("isDefault")), + "keysCount": len(keys) if isinstance(keys, dict) else 0, + } + ) + return sorted(out, key=lambda x: (not x.get("isDefault"), x["code"])) + + +@router.get("/sets/{code}") +async def get_language_set(code: str): + db = _publicMgmtDb() + rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code}) + if not rows: + raise HTTPException(status_code=404, detail="Sprachset nicht gefunden") + return _row_to_public(rows[0]) + + +# --- Auth user -------------------------------------------------------------- + + +class CreateLanguageBody(BaseModel): + code: str = Field(..., min_length=2, max_length=10) + label: str = Field(..., min_length=1, max_length=80) + + +def _validate_iso2_code(code: str) -> str: + c = code.strip().lower() + if not re.fullmatch(r"[a-z]{2}", c): + raise HTTPException( + status_code=400, detail="Nur ISO-639-1 Zwei-Buchstaben-Codes erlaubt." + ) + return c + + +def _run_create_language_job(userId: str, code: str, label: str, currentUser: User, mandateId: str) -> None: + """Background job: translate all German master keys via AI, persist, notify user.""" + loop = asyncio.new_event_loop() + try: + loop.run_until_complete(_run_create_language_job_async(userId, code, label, currentUser, mandateId)) + finally: + loop.close() + + +async def _run_create_language_job_async(userId: str, code: str, label: str, currentUser: User, mandateId: str) -> None: + try: + db = _publicMgmtDb() + rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code}) + if not rows: + return + deKeys = _load_master_de_keys(db) + if not deKeys: + logger.error("i18n create job: no de master keys found") + return + + billingCb = _makeBillingCallback(currentUser, mandateId) + translated = await _translateBatch(deKeys, label, code, billingCallback=billingCb) + + finalKeys: Dict[str, str] = {} + for k in deKeys: + finalKeys[k] = translated.get(k, f"[{k}]") + + missingCount = sum(1 for k in deKeys if k not in translated) + finalStatus = "complete" if missingCount == 0 else "incomplete" + + now = getUtcTimestamp() + merged = dict(rows[0]) + merged["keys"] = finalKeys + merged["status"] = finalStatus + merged["label"] = label + merged["sysModifiedAt"] = now + merged["sysModifiedBy"] = userId + db.recordModify(UiLanguageSet, code, merged) + + statusHint = "" if finalStatus == "complete" else f" ({missingCount} Keys ohne Übersetzung)" + _createNotification( + userId, + NotificationType.SYSTEM, + title="Sprachset erstellt", + message=f"Die Sprache «{label}» ({code}) wurde per KI übersetzt{statusHint}.", + ) + logger.info("i18n create job done: code=%s, translated=%d/%d", code, len(translated), len(deKeys)) + except Exception as e: + logger.exception("create language job failed: %s", e) + _createNotification( + userId, + NotificationType.SYSTEM, + title="Sprachset fehlgeschlagen", + message=f"Fehler bei «{code}»: {e}", + ) + + +@router.post("/sets") +async def create_language_set( + request: Request, + body: CreateLanguageBody, + background: BackgroundTasks, + currentUser: User = Depends(getCurrentUser), +): + mandateId = _resolveMandateIdForAiI18n(request, currentUser) + code = _validate_iso2_code(body.code) + if code == "de": + raise HTTPException(status_code=400, detail="Das Standard-Set «de» kann nicht erneut angelegt werden.") + + db = _publicMgmtDb() + existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code}) + if existing: + raise HTTPException(status_code=409, detail="Dieses Sprachset existiert bereits.") + + deKeys = _load_master_de_keys(db) + if not deKeys: + raise HTTPException(status_code=503, detail="Deutsch-Master nicht geseedet.") + + now = getUtcTimestamp() + uid = str(currentUser.id) + rec: dict = { + "id": code, + "label": body.label.strip(), + "keys": {}, + "status": "generating", + "isDefault": False, + "sysCreatedAt": now, + "sysCreatedBy": uid, + "sysModifiedAt": now, + "sysModifiedBy": uid, + } + db.recordCreate(UiLanguageSet, rec) + + background.add_task(_run_create_language_job, uid, code, body.label.strip(), currentUser, mandateId) + _createNotification( + uid, + NotificationType.SYSTEM, + title="Sprachset wird erzeugt", + message=f"Die Sprache «{code}» wird im Hintergrund per KI übersetzt.", + ) + return {"status": "accepted", "code": code} + + +async def _sync_non_de_set_with_de(db, code: str, userId: Optional[str], adminUser: Optional[User] = None) -> dict: + if code == "de": + raise HTTPException(status_code=400, detail="Das de-Set wird nicht per Update synchronisiert.") + rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code}) + if not rows: + raise HTTPException(status_code=404, detail="Sprachset nicht gefunden") + deKeys = _load_master_de_keys(db) + row = dict(rows[0]) + cur: Dict[str, str] = dict(row.get("keys") or {}) + masterKeys = set(deKeys.keys()) + currentKeys = set(cur.keys()) + removed = list(currentKeys - masterKeys) + added = list(masterKeys - currentKeys) + for k in removed: + del cur[k] + + translatedCount = 0 + if added: + toTranslate = {k: deKeys[k] for k in added} + langLabel = row.get("label") or code + billingCb = None + if adminUser: + memberIds = _userMemberMandateIds(adminUser) + if memberIds: + billingCb = _makeBillingCallback(adminUser, memberIds[0]) + try: + translated = await _translateBatch(toTranslate, langLabel, code, billingCallback=billingCb) + for k in added: + cur[k] = translated.get(k, f"[{k}]") + translatedCount = sum(1 for k in added if k in translated) + except Exception as e: + logger.error("AI translation during sync failed for %s: %s", code, e) + for k in added: + cur[k] = f"[{k}]" + + now = getUtcTimestamp() + row["keys"] = cur + untranslated = len(added) - translatedCount + row["status"] = "complete" if untranslated == 0 else "incomplete" + row["sysModifiedAt"] = now + row["sysModifiedBy"] = userId + db.recordModify(UiLanguageSet, code, row) + return {"code": code, "added": added, "removed": removed, "translated": translatedCount, "keysCount": len(cur)} + + +@router.put("/sets/sync-de") +async def sync_de_master_from_codebase( + adminUser: User = Depends(requireSysAdminRole), +): + """Scan frontend codebase for t() keys and synchronise the de master set. + + Adds new keys (key=value=German plaintext), removes orphaned keys. + """ + db = getMgmtInterface(adminUser, mandateId=None).db + return _syncDeMasterFromCodebase(db, str(adminUser.id)) + + +@router.put("/sets/update-all") +async def update_all_language_sets( + adminUser: User = Depends(requireSysAdminRole), +): + """Sync de-master from codebase, then update all non-de sets via AI.""" + db = getMgmtInterface(adminUser, mandateId=None).db + + deSync = _syncDeMasterFromCodebase(db, str(adminUser.id)) + + rows = db.getRecordset(UiLanguageSet) + results = [] + for r in rows: + cid = r["id"] + if cid == "de": + continue + res = await _sync_non_de_set_with_de(db, cid, str(adminUser.id), adminUser=adminUser) + results.append(res) + return {"deSync": deSync, "updated": results} + + +@router.put("/sets/{code}") +async def update_language_set( + code: str, + adminUser: User = Depends(requireSysAdminRole), +): + c = code.strip().lower() + if c in ("update-all", "sync-de"): + raise HTTPException(status_code=400, detail="Ungültiger Sprachcode.") + db = getMgmtInterface(adminUser, mandateId=None).db + + deSync = _syncDeMasterFromCodebase(db, str(adminUser.id)) + + langResult = await _sync_non_de_set_with_de(db, c, str(adminUser.id), adminUser=adminUser) + langResult["deSync"] = deSync + return langResult + + +@router.delete("/sets/{code}") +async def delete_language_set( + code: str, + adminUser: User = Depends(requireSysAdminRole), +): + c = code.strip().lower() + if c == "de": + raise HTTPException(status_code=400, detail="Das Standard-Set «de» darf nicht gelöscht werden.") + db = getMgmtInterface(adminUser, mandateId=None).db + ok = db.recordDelete(UiLanguageSet, c) + if not ok: + raise HTTPException(status_code=404, detail="Sprachset nicht gefunden") + return {"deleted": c} + + +@router.get("/sets/{code}/download", dependencies=[Depends(getCurrentUser)]) +async def download_language_set( + code: str, + currentUser: User = Depends(getCurrentUser), +): + db = _publicMgmtDb() + rows = db.getRecordset(UiLanguageSet, recordFilter={"id": code.strip().lower()}) + if not rows: + raise HTTPException(status_code=404, detail="Sprachset nicht gefunden") + payload = _row_to_public(rows[0]) + raw = json.dumps(payload.get("keys", {}), ensure_ascii=False, indent=2) + return Response( + content=raw, + media_type="application/json", + headers={ + "Content-Disposition": f'attachment; filename="ui-language-{code}.json"' + }, + ) + + +# --- Export / Import (full DB) ----------------------------------------------- + + +@router.get("/export") +async def export_all_language_sets( + adminUser: User = Depends(requireSysAdminRole), +): + """Export the complete language database as a JSON array (all sets with full metadata).""" + db = getMgmtInterface(adminUser, mandateId=None).db + rows = db.getRecordset(UiLanguageSet) + payload = [] + for r in rows: + payload.append({ + "id": r["id"], + "label": r.get("label", ""), + "keys": dict(r.get("keys") or {}), + "status": r.get("status", "complete"), + "isDefault": bool(r.get("isDefault", False)), + }) + payload.sort(key=lambda x: (not x.get("isDefault"), x["id"])) + raw = json.dumps(payload, ensure_ascii=False, indent=2) + return Response( + content=raw, + media_type="application/json", + headers={ + "Content-Disposition": 'attachment; filename="ui-languages-export.json"' + }, + ) + + +@router.post("/import") +async def import_language_sets( + file: UploadFile = File(...), + adminUser: User = Depends(requireSysAdminRole), +): + """Import a previously exported language database JSON. + + Behaviour per set in the uploaded array: + - If the set already exists in DB → overwrite keys, label, status, isDefault + - If the set does not exist → create it + Existing sets NOT present in the upload are left untouched (no deletion). + """ + if not file.filename or not file.filename.endswith(".json"): + raise HTTPException(status_code=400, detail="Nur .json-Dateien erlaubt.") + + try: + raw = await file.read() + data = json.loads(raw.decode("utf-8")) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + raise HTTPException(status_code=400, detail=f"Ungültiges JSON: {e}") + + if not isinstance(data, list): + raise HTTPException(status_code=400, detail="JSON muss ein Array von Sprachsets sein.") + + db = getMgmtInterface(adminUser, mandateId=None).db + now = getUtcTimestamp() + uid = str(adminUser.id) + created = [] + updated = [] + + for entry in data: + if not isinstance(entry, dict): + continue + code = str(entry.get("id", "")).strip().lower() + if not code or len(code) < 2: + continue + keys = entry.get("keys") + if not isinstance(keys, dict): + continue + + label = str(entry.get("label", code)) + entryStatus = str(entry.get("status", "complete")) + isDefault = bool(entry.get("isDefault", False)) + + existing = db.getRecordset(UiLanguageSet, recordFilter={"id": code}) + if existing: + row = dict(existing[0]) + row["keys"] = keys + row["label"] = label + row["status"] = entryStatus + row["isDefault"] = isDefault + row["sysModifiedAt"] = now + row["sysModifiedBy"] = uid + db.recordModify(UiLanguageSet, code, row) + updated.append(code) + else: + rec = { + "id": code, + "label": label, + "keys": keys, + "status": entryStatus, + "isDefault": isDefault, + "sysCreatedAt": now, + "sysCreatedBy": uid, + "sysModifiedAt": now, + "sysModifiedBy": uid, + } + db.recordCreate(UiLanguageSet, rec) + created.append(code) + + logger.info("i18n import: created=%s, updated=%s", created, updated) + return {"created": created, "updated": updated, "totalProcessed": len(created) + len(updated)} diff --git a/modules/routes/routeSubscription.py b/modules/routes/routeSubscription.py index 97a7f23b..3e25ec39 100644 --- a/modules/routes/routeSubscription.py +++ b/modules/routes/routeSubscription.py @@ -357,6 +357,34 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]: operativeValues = {s.value for s in OPERATIVE_STATUSES} + operativeMandateIds = list({ + sub.get("mandateId") for sub in allSubs + if sub.get("mandateId") and sub.get("status") in operativeValues + }) + + userCountMap: Dict[str, int] = {} + instanceCountMap: Dict[str, int] = {} + if operativeMandateIds: + try: + from modules.datamodels.datamodelMembership import UserMandate + from modules.datamodels.datamodelFeatures import FeatureInstance + from modules.security.rootAccess import getRootDbAppConnector + appDb = getRootDbAppConnector() + allUM = appDb.getRecordset(UserMandate, recordFilter={"mandateId": operativeMandateIds}) + for um in (allUM or []): + mid = um.get("mandateId") if isinstance(um, dict) else getattr(um, "mandateId", None) + if mid: + userCountMap[mid] = userCountMap.get(mid, 0) + 1 + allFI = appDb.getRecordset(FeatureInstance, recordFilter={"mandateId": operativeMandateIds}) + for fi in (allFI or []): + fid = fi if isinstance(fi, dict) else fi.__dict__ + if fid.get("enabled"): + mid = fid.get("mandateId") + if mid: + instanceCountMap[mid] = instanceCountMap.get(mid, 0) + 1 + except Exception as e: + logger.warning("Batch count for subscriptions failed: %s", e) + enriched = [] for sub in allSubs: mid = sub.get("mandateId", "") @@ -369,12 +397,8 @@ def _buildEnrichedSubscriptions() -> List[Dict[str, Any]]: if sub.get("status") in operativeValues: userPrice = sub.get("snapshotPricePerUserCHF", 0) or 0 instPrice = sub.get("snapshotPricePerInstanceCHF", 0) or 0 - try: - userCount = subInterface.countActiveUsers(mid) - instanceCount = subInterface.countActiveFeatureInstances(mid) - except Exception: - userCount = 0 - instanceCount = 0 + userCount = userCountMap.get(mid, 0) + instanceCount = instanceCountMap.get(mid, 0) sub["monthlyRevenueCHF"] = round(userPrice * userCount + instPrice * instanceCount, 2) sub["activeUsers"] = userCount sub["activeInstances"] = instanceCount @@ -492,13 +516,13 @@ def _getDataVolumeUsage( mgmtDb = getMgmtInterface().db totalFileBytes = 0 - for instId in instIds: - files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instId}) - for f in files: + if instIds: + files = mgmtDb.getRecordset(FileItem, recordFilter={"featureInstanceId": instIds}) + for f in (files or []): size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) totalFileBytes += (size or 0) mandateFiles = mgmtDb.getRecordset(FileItem, recordFilter={"mandateId": mandateId}) - for f in mandateFiles: + for f in (mandateFiles or []): size = f.get("fileSize") if isinstance(f, dict) else getattr(f, "fileSize", 0) totalFileBytes += (size or 0) filesMB = round(totalFileBytes / (1024 * 1024), 2) diff --git a/modules/routes/routeSystem.py b/modules/routes/routeSystem.py index 7e452be3..03c58a18 100644 --- a/modules/routes/routeSystem.py +++ b/modules/routes/routeSystem.py @@ -187,8 +187,6 @@ def _buildDynamicBlock( # Convert Pydantic model to dict if needed if hasattr(featureLabel, 'model_dump'): featureLabel = featureLabel.model_dump() - elif hasattr(featureLabel, 'dict'): - featureLabel = featureLabel.dict() elif not isinstance(featureLabel, dict): # Fallback: try to access as attributes featureLabel = {"de": getattr(featureLabel, 'de', instance.featureCode), "en": getattr(featureLabel, 'en', instance.featureCode)} @@ -392,8 +390,10 @@ def _buildStaticBlocks( if section.get("adminOnly") and not isSysAdmin: continue - # Handle sections with subgroups - if "subgroups" in section: + hasSubgroups = "subgroups" in section + hasItems = "items" in section and len(section["items"]) > 0 + + if hasSubgroups: filteredSubgroups = [] for subgroup in section["subgroups"]: subItems = _filterItems( @@ -406,24 +406,29 @@ def _buildStaticBlocks( "order": subgroup.get("order", 50), "items": subItems, }) - + filteredSubgroups.sort(key=lambda s: s["order"]) - - if filteredSubgroups: + + topLevelItems = [] + if hasItems: + topLevelItems = _filterItems( + section["items"], language, isSysAdmin, roleIds, hasGlobalPermission + ) + + if filteredSubgroups or topLevelItems: blocks.append({ "type": "static", "id": section["id"], "title": section["title"].get(language, section["title"].get("en", section["id"])), "order": section.get("order", 50), - "items": [], + "items": topLevelItems, "subgroups": filteredSubgroups, }) else: - # Standard flat section filteredItems = _filterItems( section.get("items", []), language, isSysAdmin, roleIds, hasGlobalPermission ) - + if filteredItems: blocks.append({ "type": "static", diff --git a/modules/routes/routeWorkflowDashboard.py b/modules/routes/routeWorkflowDashboard.py new file mode 100644 index 00000000..687f4206 --- /dev/null +++ b/modules/routes/routeWorkflowDashboard.py @@ -0,0 +1,267 @@ +# Copyright (c) 2025 Patrick Motsch +# All rights reserved. +""" +System-level Workflow Runs Dashboard API. + +Provides cross-feature, cross-mandate access to workflow runs +with RBAC scoping: user sees own runs, mandate admin sees mandate runs, +sysadmin sees all runs. +""" + +import logging +import math +from typing import Optional +from fastapi import APIRouter, Depends, Request, Query, Path, HTTPException +from slowapi import Limiter +from slowapi.util import get_remote_address + +from modules.auth.authentication import getRequestContext, RequestContext +from modules.interfaces.interfaceDbApp import getRootInterface +from modules.connectors.connectorDbPostgre import DatabaseConnector +from modules.shared.configuration import APP_CONFIG +from modules.datamodels.datamodelPagination import PaginationParams +from modules.features.graphicalEditor.datamodelFeatureGraphicalEditor import ( + AutoRun, AutoStepLog, AutoWorkflow, AutoTask, +) + +logger = logging.getLogger(__name__) +limiter = Limiter(key_func=get_remote_address) + +router = APIRouter(prefix="/api/system/workflow-runs", tags=["WorkflowDashboard"]) + +_GREENFIELD_DB = "poweron_graphicaleditor" + + +def _getDb() -> DatabaseConnector: + return DatabaseConnector( + dbHost=APP_CONFIG.get("DB_HOST", "localhost"), + dbDatabase=_GREENFIELD_DB, + dbUser=APP_CONFIG.get("DB_USER"), + dbPassword=APP_CONFIG.get("DB_PASSWORD_SECRET") or APP_CONFIG.get("DB_PASSWORD"), + dbPort=int(APP_CONFIG.get("DB_PORT", 5432)), + userId=None, + ) + + +def _getUserMandateIds(userId: str) -> list[str]: + """Get mandate IDs the user is a member of.""" + rootIface = getRootInterface() + memberships = rootIface.getUserMandates(userId) + return [um.mandateId for um in memberships if um.mandateId and um.enabled] + + +def _getAdminMandateIds(userId: str, mandateIds: list) -> list: + """Batch-check which mandates the user is admin for (2 SQL queries total).""" + if not mandateIds: + return [] + rootIface = getRootInterface() + from modules.datamodels.datamodelMembership import UserMandateRole + allRoles = rootIface.db.getRecordset(UserMandateRole, recordFilter={ + "userId": userId, "mandateId": mandateIds, + }) + if not allRoles: + return [] + + roleIds = set() + roleToMandate: dict = {} + for r in allRoles: + row = r if isinstance(r, dict) else r.__dict__ + rid = row.get("roleId") + mid = row.get("mandateId") + if rid: + roleIds.add(rid) + roleToMandate.setdefault(rid, set()).add(mid) + + if not roleIds: + return [] + + from modules.datamodels.datamodelRbac import MandateRole + roleRecords = rootIface.db.getRecordset(MandateRole, recordFilter={"id": list(roleIds)}) + adminMandates: set = set() + for role in (roleRecords or []): + row = role if isinstance(role, dict) else role.__dict__ + if row.get("isAdmin"): + rid = row.get("id") + if rid and rid in roleToMandate: + adminMandates.update(roleToMandate[rid]) + + return [mid for mid in mandateIds if mid in adminMandates] + + +def _scopedRunFilter(context: RequestContext) -> Optional[dict]: + """ + Build a DB filter dict based on RBAC: + - sysadmin: None (no filter) + - mandate admin: mandateId IN user's mandates + - normal user: ownerId = userId + """ + if context.hasSysAdminRole: + return None + + userId = str(context.user.id) if context.user else None + if not userId: + return {"ownerId": "__impossible__"} + + mandateIds = _getUserMandateIds(userId) + adminMandateIds = _getAdminMandateIds(userId, mandateIds) + + if adminMandateIds: + return {"mandateId": adminMandateIds} + + return {"ownerId": userId} + + +@router.get("") +@limiter.limit("60/minute") +def get_workflow_runs( + request: Request, + limit: int = Query(50, ge=1, le=200), + offset: int = Query(0, ge=0), + status: Optional[str] = Query(None, description="Filter by status"), + mandateId: Optional[str] = Query(None, description="Filter by mandate"), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """List workflow runs with RBAC scoping (SQL-paginated).""" + db = _getDb() + if not db._ensureTableExists(AutoRun): + return {"runs": [], "total": 0, "limit": limit, "offset": offset} + + baseFilter = _scopedRunFilter(context) + recordFilter = dict(baseFilter) if baseFilter else {} + + if status: + recordFilter["status"] = status + if mandateId: + recordFilter["mandateId"] = mandateId + + page = (offset // limit) + 1 if limit > 0 else 1 + pagination = PaginationParams( + page=page, + pageSize=limit, + sort=[{"field": "sysCreatedAt", "direction": "desc"}], + ) + + result = db.getRecordsetPaginated( + AutoRun, + pagination=pagination, + recordFilter=recordFilter if recordFilter else None, + ) + pageRuns = result.get("items", []) if isinstance(result, dict) else result.items + total = result.get("totalItems", 0) if isinstance(result, dict) else result.totalItems + + wfIds = list({r.get("workflowId") for r in pageRuns if r.get("workflowId")}) + wfLabelMap = {} + if wfIds and db._ensureTableExists(AutoWorkflow): + wfs = db.getRecordset(AutoWorkflow, recordFilter={"id": wfIds}) + for wf in (wfs or []): + wfLabelMap[wf.get("id")] = wf.get("label") or wf.get("id") + + runs = [] + for r in pageRuns: + row = dict(r) + row["workflowLabel"] = wfLabelMap.get(row.get("workflowId"), row.get("workflowId") or "—") + runs.append(row) + + return {"runs": runs, "total": total, "limit": limit, "offset": offset} + + +@router.get("/metrics") +@limiter.limit("60/minute") +def get_workflow_metrics( + request: Request, + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Aggregated metrics across all accessible workflow runs (SQL COUNT).""" + db = _getDb() + if not db._ensureTableExists(AutoRun): + return {"totalRuns": 0, "runsByStatus": {}, "totalTokens": 0, "totalCredits": 0} + + baseFilter = _scopedRunFilter(context) + + countPagination = PaginationParams(page=1, pageSize=1) + countResult = db.getRecordsetPaginated(AutoRun, pagination=countPagination, recordFilter=baseFilter) + totalRuns = countResult.get("totalItems", 0) if isinstance(countResult, dict) else countResult.totalItems + + statusValues = db.getDistinctColumnValues(AutoRun, "status", recordFilter=baseFilter) + + runsByStatus = {} + for sv in statusValues: + statusFilter = dict(baseFilter) if baseFilter else {} + statusFilter["status"] = sv + sr = db.getRecordsetPaginated(AutoRun, pagination=PaginationParams(page=1, pageSize=1), recordFilter=statusFilter) + runsByStatus[sv] = sr.get("totalItems", 0) if isinstance(sr, dict) else sr.totalItems + + totalTokens = 0 + totalCredits = 0.0 + if totalRuns > 0 and totalRuns <= 10000: + allRuns = db.getRecordset(AutoRun, recordFilter=baseFilter, fieldFilter=["costTokens", "costCredits"]) or [] + for r in allRuns: + totalTokens += r.get("costTokens", 0) or 0 + totalCredits += r.get("costCredits", 0.0) or 0.0 + + workflowCount = 0 + activeWorkflows = 0 + if db._ensureTableExists(AutoWorkflow): + wfFilter: dict = {"isTemplate": False} + if not context.hasSysAdminRole: + userId = str(context.user.id) if context.user else None + mandateIds = _getUserMandateIds(userId) if userId else [] + if mandateIds: + wfFilter["mandateId"] = mandateIds + else: + wfFilter["mandateId"] = "__impossible__" + + wfCount = db.getRecordsetPaginated(AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1), recordFilter=wfFilter) + workflowCount = wfCount.get("totalItems", 0) if isinstance(wfCount, dict) else wfCount.totalItems + + activeFilter = dict(wfFilter) + activeFilter["active"] = True + activeCount = db.getRecordsetPaginated(AutoWorkflow, pagination=PaginationParams(page=1, pageSize=1), recordFilter=activeFilter) + activeWorkflows = activeCount.get("totalItems", 0) if isinstance(activeCount, dict) else activeCount.totalItems + + return { + "totalRuns": totalRuns, + "runsByStatus": runsByStatus, + "totalTokens": totalTokens, + "totalCredits": round(totalCredits, 4), + "workflowCount": workflowCount, + "activeWorkflows": activeWorkflows, + } + + +@router.get("/{runId}/steps") +@limiter.limit("60/minute") +def get_run_steps( + request: Request, + runId: str = Path(..., description="Run ID"), + context: RequestContext = Depends(getRequestContext), +) -> dict: + """Get step logs for a specific run (with access check).""" + db = _getDb() + if not db._ensureTableExists(AutoRun): + raise HTTPException(status_code=404, detail="Run not found") + + runs = db.getRecordset(AutoRun, recordFilter={"id": runId}) + if not runs: + raise HTTPException(status_code=404, detail="Run not found") + run = dict(runs[0]) + + if not context.hasSysAdminRole: + userId = str(context.user.id) if context.user else None + runOwner = run.get("ownerId") + runMandate = run.get("mandateId") + + if runOwner == userId: + pass + elif runMandate and userId and _isUserMandateAdmin(userId, runMandate): + pass + else: + raise HTTPException(status_code=403, detail="Access denied") + + if not db._ensureTableExists(AutoStepLog): + return {"steps": []} + + records = db.getRecordset(AutoStepLog, recordFilter={"runId": runId}) + steps = [dict(r) for r in records] if records else [] + steps.sort(key=lambda s: s.get("startedAt") or 0) + return {"steps": steps} diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py index b9c113b1..dac3308b 100644 --- a/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py +++ b/modules/serviceCenter/services/serviceAgent/coreTools/_documentTools.py @@ -304,11 +304,40 @@ def _registerDocumentTools(registry: ToolRegistry, services): imageData = fileContent.get("data", "") mimeType = fileMimeType + # 4) PDF page rendering: render the requested page as an image via PyMuPDF if not imageData: chatService = services.chat fileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None - fileName = fileInfo.get("fileName", fileId) if fileInfo else fileId - fileMime = fileInfo.get("mimeType", "unknown") if fileInfo else "unknown" + fileMime = (fileInfo.get("mimeType", "") if fileInfo else "").lower() + if fileMime == "application/pdf" or (fileInfo and (fileInfo.get("fileName", "") or "").lower().endswith(".pdf")): + try: + import fitz as _fitz + rawContent = chatService.getFileContent(fileId) if not fileContent else fileContent + rawData = rawContent.get("data", "") if rawContent else "" + if isinstance(rawData, str) and len(rawData) > 100: + pdfBytes = _b64.b64decode(rawData) + elif isinstance(rawData, bytes): + pdfBytes = rawData + else: + pdfBytes = None + if pdfBytes: + doc = _fitz.open(stream=pdfBytes, filetype="pdf") + targetPage = pageIndex if pageIndex is not None else 0 + if 0 <= targetPage < len(doc): + page = doc[targetPage] + pix = page.get_pixmap(dpi=200) + imageData = _b64.b64encode(pix.tobytes("png")).decode("ascii") + mimeType = "image/png" + logger.info("describeImage: rendered PDF page %d as image (%dx%d)", targetPage, pix.width, pix.height) + doc.close() + except Exception as pdfErr: + logger.warning("describeImage: PDF page rendering failed: %s", pdfErr) + + if not imageData: + chatService = services.chat + _errFileInfo = chatService.getFileInfo(fileId) if hasattr(chatService, "getFileInfo") else None + fileName = _errFileInfo.get("fileName", fileId) if _errFileInfo else fileId + fileMime = _errFileInfo.get("mimeType", "unknown") if _errFileInfo else "unknown" return ToolResult(toolCallId="", toolName="describeImage", success=False, error=f"No image data found in '{fileName}' (type: {fileMime}). " f"This file likely contains text, not images. Use readFile(fileId=\"{fileId}\") to access its text content.") diff --git a/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py b/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py index 756079ad..04e32ad8 100644 --- a/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py +++ b/modules/serviceCenter/services/serviceAgent/coreTools/_featureSubAgentTools.py @@ -49,6 +49,19 @@ def _getOrCreateFeatureDbConnector(featureDbName: str, userId: str): return conn +def clearFeatureQueryCache(featureInstanceId: Optional[str] = None) -> int: + """Clear the feature data query cache. If featureInstanceId given, only for that instance.""" + if featureInstanceId: + prefix = f"{featureInstanceId}:" + keys = [k for k in _featureQueryCache if k.startswith(prefix)] + else: + keys = list(_featureQueryCache.keys()) + for k in keys: + del _featureQueryCache[k] + logger.info(f"Feature query cache cleared: {len(keys)} entries removed (instance={featureInstanceId or 'all'})") + return len(keys) + + def _registerFeatureSubAgentTools(registry: ToolRegistry, services): """Auto-extracted from registerCoreTools.""" # ---- Feature Data Sub-Agent tool ---- diff --git a/modules/serviceCenter/services/serviceAgent/featureDataAgent.py b/modules/serviceCenter/services/serviceAgent/featureDataAgent.py index e74f4b55..9db820ca 100644 --- a/modules/serviceCenter/services/serviceAgent/featureDataAgent.py +++ b/modules/serviceCenter/services/serviceAgent/featureDataAgent.py @@ -283,18 +283,8 @@ def _buildSchemaContext( selectedTables: List[Dict[str, Any]], ) -> str: """Build a system-level context block describing available tables.""" - parts = [ - f"You are a data query assistant for the '{featureCode}' feature", - ] - if instanceLabel: - parts[0] += f' (instance: "{instanceLabel}")' - parts[0] += "." - parts.append( - "You have access to the following data tables. " - "Use browseTable to list rows, queryTable to filter/search, " - "and aggregateTable for SUM/COUNT/AVG/MIN/MAX with optional GROUP BY." - ) - parts.append("") + tableNames = [] + tableBlocks = [] for obj in selectedTables: meta = obj.get("meta", {}) @@ -302,11 +292,38 @@ def _buildSchemaContext( fields = meta.get("fields", []) label = obj.get("label", {}) labelStr = label.get("en") or label.get("de") or tbl - parts.append(f"Table: {tbl} ({labelStr})") + tableNames.append(tbl) + block = f" Table: {tbl} ({labelStr})" if fields: - parts.append(f" Fields: {', '.join(fields)}") - parts.append("") + block += f"\n Fields: {', '.join(fields)}" + tableBlocks.append(block) + parts = [ + f"You are a data query assistant for the '{featureCode}' feature", + ] + if instanceLabel: + parts[0] += f' (instance: "{instanceLabel}")' + parts[0] += "." + + parts.append("") + parts.append("AVAILABLE TABLES (use EXACTLY these names as tableName parameter):") + parts.extend(tableBlocks) + parts.append("") + parts.append( + "IMPORTANT RULES:\n" + f"- The ONLY valid tableName values are: {tableNames}\n" + "- Do NOT invent table names, do NOT use UUIDs or IDs as table names.\n" + "- Field names are plain column names (e.g. 'accountNumber', 'periodYear').\n" + " Do NOT prefix field names with UUIDs, table names, or dots.\n" + "- If unsure about column names, call browseTable with only tableName (no fields)\n" + " to see actual columns first." + ) + parts.append("") + parts.append( + "Tools: browseTable (list rows), queryTable (filter/search), " + "aggregateTable (SUM/COUNT/AVG/MIN/MAX with optional GROUP BY)." + ) + parts.append("") parts.append( "Answer the user's question using the data from these tables. " "Be precise, cite row counts, and format data clearly." diff --git a/modules/serviceCenter/services/serviceAgent/featureDataProvider.py b/modules/serviceCenter/services/serviceAgent/featureDataProvider.py index 5b68d3ee..a44dcd07 100644 --- a/modules/serviceCenter/services/serviceAgent/featureDataProvider.py +++ b/modules/serviceCenter/services/serviceAgent/featureDataProvider.py @@ -78,6 +78,15 @@ class FeatureDataProvider: """ _validateTableName(tableName) conn = self._db.connection + + if fields: + invalid = [f for f in fields if not _isValidIdentifier(f)] + if invalid: + return { + "rows": [], "total": 0, "limit": limit, "offset": offset, + "error": f"Invalid field name(s): {', '.join(invalid)}. Use getActualColumns to discover valid column names.", + } + scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn) extraWhere, extraParams = _buildFilterClauses(extraFilters) @@ -105,6 +114,10 @@ class FeatureDataProvider: return {"rows": rows, "total": total, "limit": limit, "offset": offset} except Exception as e: logger.error(f"browseTable({tableName}) failed: {e}") + try: + conn.rollback() + except Exception: + pass return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)} def aggregateTable( @@ -164,6 +177,10 @@ class FeatureDataProvider: } except Exception as e: logger.error(f"aggregateTable({tableName}, {aggregate}({field})) failed: {e}") + try: + conn.rollback() + except Exception: + pass return {"rows": [], "error": str(e), "aggregate": aggregate, "field": field, "groupBy": groupBy} def queryTable( @@ -185,6 +202,15 @@ class FeatureDataProvider: """ _validateTableName(tableName) conn = self._db.connection + + if fields: + invalid = [f for f in fields if not _isValidIdentifier(f)] + if invalid: + return { + "rows": [], "total": 0, "limit": limit, "offset": offset, + "error": f"Invalid field name(s): {', '.join(invalid)}. Use getActualColumns to discover valid column names.", + } + scopeFilter = _buildScopeFilter(tableName, featureInstanceId, mandateId, dbConnection=conn) combinedFilters = list(filters or []) + list(extraFilters or []) @@ -214,6 +240,10 @@ class FeatureDataProvider: return {"rows": rows, "total": total, "limit": limit, "offset": offset} except Exception as e: logger.error(f"queryTable({tableName}) failed: {e}") + try: + conn.rollback() + except Exception: + pass return {"rows": [], "total": 0, "limit": limit, "offset": offset, "error": str(e)} diff --git a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py index b371e01e..0d5f3178 100644 --- a/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py +++ b/modules/serviceCenter/services/serviceAgent/mainServiceAgent.py @@ -312,9 +312,13 @@ class AgentService: if tb.id == "workflow": try: from modules.serviceCenter.services.serviceAgent.workflowTools import getWorkflowToolDefinitions + from modules.serviceCenter.services.serviceAgent.datamodelAgent import ToolDefinition wfDefs = getWorkflowToolDefinitions() - for toolDef in wfDefs: - registry.registerFromDefinition(toolDef, toolDef._handler if hasattr(toolDef, "_handler") else None) + for rawDef in wfDefs: + handler = rawDef.get("handler") + defFields = {k: v for k, v in rawDef.items() if k != "handler"} + toolDef = ToolDefinition(**defFields) + registry.registerFromDefinition(toolDef, handler) logger.info("Registered %d workflow tools from toolbox", len(wfDefs)) except Exception as e: logger.warning("Could not register workflow tools: %s", e) diff --git a/modules/serviceCenter/services/serviceAi/subContentExtraction.py b/modules/serviceCenter/services/serviceAi/subContentExtraction.py index 1d1236da..e050bb67 100644 --- a/modules/serviceCenter/services/serviceAi/subContentExtraction.py +++ b/modules/serviceCenter/services/serviceAi/subContentExtraction.py @@ -430,7 +430,7 @@ class ContentExtractor: # Debug-Log (harmonisiert) self.services.utils.writeDebugFile( - json.dumps([part.dict() for part in allContentParts], indent=2, default=str), + json.dumps([part.model_dump() for part in allContentParts], indent=2, default=str), "content_extraction_result" ) diff --git a/modules/serviceCenter/services/serviceAi/subDocumentIntents.py b/modules/serviceCenter/services/serviceAi/subDocumentIntents.py index 42dfef14..7a462177 100644 --- a/modules/serviceCenter/services/serviceAi/subDocumentIntents.py +++ b/modules/serviceCenter/services/serviceAi/subDocumentIntents.py @@ -105,7 +105,7 @@ class DocumentIntentAnalyzer: # Debug-Log (harmonisiert) self.services.utils.writeDebugFile( - json.dumps([intent.dict() for intent in documentIntents], indent=2), + json.dumps([intent.model_dump() for intent in documentIntents], indent=2), "document_intent_analysis_result" ) diff --git a/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py b/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py index 99da173e..b9377404 100644 --- a/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py +++ b/modules/serviceCenter/services/serviceGeneration/mainServiceGeneration.py @@ -99,11 +99,7 @@ class GenerationService: if mime_type == "application/json": # Erstelle ActionDocument-Format mit validationMetadata und documentData if hasattr(document_data, 'model_dump'): - # Pydantic v2 document_data_dict = document_data.model_dump() - elif hasattr(document_data, 'dict'): - # Pydantic v1 - document_data_dict = document_data.dict() elif isinstance(document_data, dict): document_data_dict = document_data elif isinstance(document_data, str): diff --git a/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py b/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py index 62e72c69..c2438fc0 100644 --- a/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py +++ b/modules/serviceCenter/services/serviceGeneration/subStructureGenerator.py @@ -130,8 +130,8 @@ class StructureGenerator: # Convert ContentParts to dict format for JSON serialization contentPartsList = [] for part in contentParts: - if hasattr(part, 'dict'): - partDict = part.dict() + if hasattr(part, 'model_dump'): + partDict = part.model_dump() elif isinstance(part, dict): partDict = part else: diff --git a/modules/shared/frontendTypes.py b/modules/shared/frontendTypes.py index 9b6d04e7..ab3e6939 100644 --- a/modules/shared/frontendTypes.py +++ b/modules/shared/frontendTypes.py @@ -56,19 +56,48 @@ class FrontendType(str, Enum): SHAREPOINT_FOLDER = "sharepointFolder" """SharePoint folder selector - requires connectionReference parameter in same action to load folders""" - - # Additional custom types can be added here as needed - # Examples: - # OUTLOOK_FOLDER = "outlookFolder" - # JIRA_PROJECT = "jiraProject" + + SHAREPOINT_FILE = "sharepointFile" + """SharePoint file selector - requires connectionReference parameter""" + + CLICKUP_LIST = "clickupList" + """ClickUp list selector - requires connectionReference parameter""" + + CLICKUP_TASK = "clickupTask" + """ClickUp task selector - requires connectionReference parameter""" + + # Complex Structure Types (for graph editor node configs) + CASE_LIST = "caseList" + """Case list editor for flow.switch cases""" + + FIELD_BUILDER = "fieldBuilder" + """Field builder for input.form field definitions""" + + KEY_VALUE_ROWS = "keyValueRows" + """Key-value row editor for task update entries""" + + CRON = "cron" + """Cron expression builder""" + + CONDITION = "condition" + """Structured condition builder for flow.ifElse""" + + MAPPING_TABLE = "mappingTable" + """Mapping table editor for data.transform""" + + FILTER_EXPRESSION = "filterExpression" + """Filter expression builder for data.filter""" # Mapping of custom types to their API endpoint for dynamic options CUSTOM_TYPE_OPTIONS_API: Dict[FrontendType, str] = { FrontendType.USER_CONNECTION: "user.connection", - FrontendType.DOCUMENT_REFERENCE: "workflow.documentReference", # To be implemented - FrontendType.WORKFLOW_ACTION: "workflow.action", # To be implemented - FrontendType.SHAREPOINT_FOLDER: "sharepoint.folder", # Dynamic - requires connectionReference + FrontendType.DOCUMENT_REFERENCE: "workflow.documentReference", + FrontendType.WORKFLOW_ACTION: "workflow.action", + FrontendType.SHAREPOINT_FOLDER: "sharepoint.folder", + FrontendType.SHAREPOINT_FILE: "sharepoint.file", + FrontendType.CLICKUP_LIST: "clickup.list", + FrontendType.CLICKUP_TASK: "clickup.task", } # Mapping of custom types to their description @@ -93,6 +122,21 @@ CUSTOM_TYPE_DESCRIPTIONS: Dict[FrontendType, Dict[str, str]] = { "fr": "Dossier SharePoint", "de": "SharePoint-Ordner" }, + FrontendType.SHAREPOINT_FILE: { + "en": "SharePoint File", + "fr": "Fichier SharePoint", + "de": "SharePoint-Datei" + }, + FrontendType.CLICKUP_LIST: { + "en": "ClickUp List", + "fr": "Liste ClickUp", + "de": "ClickUp-Liste" + }, + FrontendType.CLICKUP_TASK: { + "en": "ClickUp Task", + "fr": "Tâche ClickUp", + "de": "ClickUp-Aufgabe" + }, } diff --git a/modules/system/mainSystem.py b/modules/system/mainSystem.py index 44467114..9d65ef5b 100644 --- a/modules/system/mainSystem.py +++ b/modules/system/mainSystem.py @@ -35,9 +35,10 @@ FEATURE_ICON = "mdi-cog" # icon: Wird intern gehalten aber NICHT in der API Response zurückgegeben NAVIGATION_SECTIONS = [ + # ─── Meine Sicht (with top-level item + subgroups) ─── { "id": "system", - "title": {"en": "SYSTEM", "de": "SYSTEM", "fr": "SYSTÈME"}, + "title": {"en": "MY VIEW", "de": "MEINE SICHT", "fr": "MA VUE"}, "order": 10, "items": [ { @@ -49,75 +50,93 @@ NAVIGATION_SECTIONS = [ "order": 10, "public": True, }, - { - "id": "store", - "objectKey": "ui.system.store", - "label": {"en": "Store", "de": "Store", "fr": "Store"}, - "icon": "FaStore", - "path": "/store", - "order": 15, - "public": True, - }, - { - "id": "settings", - "objectKey": "ui.system.settings", - "label": {"en": "Settings", "de": "Einstellungen", "fr": "Paramètres"}, - "icon": "FaCog", - "path": "/settings", - "order": 20, - "public": True, - }, ], - }, - { - "id": "basedata", - "title": {"en": "BASE DATA", "de": "BASISDATEN", "fr": "DONNÉES DE BASE"}, - "order": 30, - "items": [ + "subgroups": [ + # ── Basisdaten ── { - "id": "prompts", - "objectKey": "ui.system.prompts", - "label": {"en": "Prompts", "de": "Prompts", "fr": "Prompts"}, - "icon": "FaLightbulb", - "path": "/basedata/prompts", - "order": 10, - }, - { - "id": "files", - "objectKey": "ui.system.files", - "label": {"en": "Files", "de": "Dateien", "fr": "Fichiers"}, - "icon": "FaRegFileAlt", - "path": "/basedata/files", + "id": "system-basedata", + "title": {"en": "Base Data", "de": "Basisdaten", "fr": "Données de base"}, "order": 20, + "items": [ + { + "id": "connections", + "objectKey": "ui.system.connections", + "label": {"en": "Connections", "de": "Verbindungen", "fr": "Connexions"}, + "icon": "FaLink", + "path": "/basedata/connections", + "order": 10, + }, + { + "id": "files", + "objectKey": "ui.system.files", + "label": {"en": "Files", "de": "Dateien", "fr": "Fichiers"}, + "icon": "FaRegFileAlt", + "path": "/basedata/files", + "order": 20, + }, + { + "id": "prompts", + "objectKey": "ui.system.prompts", + "label": {"en": "Prompts", "de": "Prompts", "fr": "Prompts"}, + "icon": "FaLightbulb", + "path": "/basedata/prompts", + "order": 30, + }, + ], }, + # ── Nutzung ── { - "id": "connections", - "objectKey": "ui.system.connections", - "label": {"en": "Connections", "de": "Verbindungen", "fr": "Connexions"}, - "icon": "FaLink", - "path": "/basedata/connections", + "id": "system-usage", + "title": {"en": "Usage", "de": "Nutzung", "fr": "Utilisation"}, "order": 30, - }, - ], - }, - { - "id": "billing", - "title": {"en": "BILLING", "de": "BILLING", "fr": "FACTURATION"}, - "order": 35, - "items": [ - { - "id": "billing-transactions", - "objectKey": "ui.billing.transactions", - "label": {"en": "Billing", "de": "Billing", "fr": "Facturation"}, - "icon": "FaWallet", - "path": "/billing/transactions", - "order": 10, + "items": [ + { + "id": "billing-admin", + "objectKey": "ui.system.billingAdmin", + "label": {"en": "Billing", "de": "Abrechnung", "fr": "Facturation"}, + "icon": "FaMoneyBillAlt", + "path": "/billing/admin", + "order": 10, + }, + { + "id": "statistics", + "objectKey": "ui.system.statistics", + "label": {"en": "Statistics", "de": "Statistiken", "fr": "Statistiques"}, + "icon": "FaChartBar", + "path": "/billing/transactions", + "order": 20, + }, + { + "id": "automations", + "objectKey": "ui.system.automations", + "label": {"en": "Automations", "de": "Automations", "fr": "Automations"}, + "icon": "FaRobot", + "path": "/automations", + "order": 30, + }, + { + "id": "store", + "objectKey": "ui.system.store", + "label": {"en": "Store", "de": "Store", "fr": "Store"}, + "icon": "FaStore", + "path": "/store", + "order": 40, + "public": True, + }, + { + "id": "settings", + "objectKey": "ui.system.settings", + "label": {"en": "Settings", "de": "Einstellungen", "fr": "Paramètres"}, + "icon": "FaCog", + "path": "/settings", + "order": 50, + "public": True, + }, + ], }, ], }, # ─── Administration (with subgroups) ─── - # Access control is at item level, NOT section level. - # Groups auto-hide if 0 visible pages for the user. { "id": "admin", "title": {"en": "ADMINISTRATION", "de": "ADMINISTRATION", "fr": "ADMINISTRATION"}, @@ -182,22 +201,13 @@ NAVIGATION_SECTIONS = [ "order": 30, "adminOnly": True, }, - { - "id": "admin-billing", - "objectKey": "ui.admin.billing", - "label": {"en": "Billing Administration", "de": "Billing-Verwaltung", "fr": "Administration de facturation"}, - "icon": "FaMoneyBillAlt", - "path": "/admin/billing", - "order": 40, - "adminOnly": True, - }, { "id": "admin-subscriptions", "objectKey": "ui.admin.subscriptions", "label": {"en": "Subscriptions", "de": "Abonnements", "fr": "Abonnements"}, "icon": "FaFileContract", "path": "/admin/subscriptions", - "order": 50, + "order": 40, "adminOnly": True, }, ], @@ -282,6 +292,16 @@ NAVIGATION_SECTIONS = [ "adminOnly": True, "sysAdminOnly": True, }, + { + "id": "admin-languages", + "objectKey": "ui.admin.languages", + "label": {"en": "UI Languages", "de": "UI-Sprachen", "fr": "Langues UI"}, + "icon": "FaGlobe", + "path": "/admin/languages", + "order": 95, + "adminOnly": True, + "sysAdminOnly": True, + }, ], }, ], diff --git a/modules/workflows/automation2/executionEngine.py b/modules/workflows/automation2/executionEngine.py index 40055b11..e7a6645f 100644 --- a/modules/workflows/automation2/executionEngine.py +++ b/modules/workflows/automation2/executionEngine.py @@ -22,9 +22,11 @@ from modules.workflows.automation2.executors import ( FlowExecutor, ActionNodeExecutor, InputExecutor, + DataExecutor, PauseForHumanTaskError, PauseForEmailWaitError, ) +from modules.features.graphicalEditor.portTypes import _normalizeToSchema from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES from modules.workflows.automation2.runEnvelope import normalize_run_envelope @@ -43,10 +45,8 @@ def _is_node_on_active_path( ) -> bool: """ Return True if this node receives input only from active branches. - - flow.ifElse: only one output (0=yes, 1=no) is active; uses "branch". - - flow.switch: only one output (0, 1, 2, ...) is active; uses "match". - Nodes connected to inactive outputs must be skipped. - Also skip when a predecessor was skipped (not in nodeOutputs). + Transit envelopes: routing metadata is in out["_meta"] (branch/match). + Legacy format: branch/match directly on out. """ for src, source_output, _ in connectionMap.get(nodeId, []): out = nodeOutputs.get(src) @@ -54,14 +54,18 @@ def _is_node_on_active_path( return False if not isinstance(out, dict): continue - branch = out.get("branch") - match = out.get("match") + + # Transit envelope: metadata in _meta + meta = out.get("_meta", {}) if out.get("_transit") else out + branch = meta.get("branch") + match = meta.get("match") + active_output = None if branch is not None: active_output = branch elif match is not None: if match < 0: - return False # switch: no case matched, skip all downstream + return False active_output = match if active_output is not None and source_output != active_output: return False @@ -78,7 +82,11 @@ def _getExecutor( return TriggerExecutor() if nodeType.startswith("flow."): return FlowExecutor() - if nodeType.startswith("ai.") or nodeType.startswith("email.") or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.") or nodeType.startswith("file."): + if nodeType.startswith("data."): + return DataExecutor() + if (nodeType.startswith("ai.") or nodeType.startswith("email.") + or nodeType.startswith("sharepoint.") or nodeType.startswith("clickup.") + or nodeType.startswith("file.") or nodeType.startswith("trustee.")): return ActionNodeExecutor(services) if nodeType.startswith("input.") and automation2_interface: return InputExecutor(automation2_interface) @@ -88,6 +96,11 @@ def _getExecutor( _stepMeta: Dict[str, Dict[str, str]] = {} +def _serializableOutputs(nodeOutputs: Dict[str, Any]) -> Dict[str, Any]: + """Return a shallow copy of nodeOutputs without the circular _context reference.""" + return {k: v for k, v in nodeOutputs.items() if k != "_context"} + + def _emitStepEvent(runId: str, stepData: Dict[str, Any]) -> None: """Emit a step-log SSE event to any listening client for this run.""" try: @@ -283,9 +296,12 @@ async def executeGraph( "_orderedNodes": ordered, "runEnvelope": env_for_run, } + # _context key in nodeOutputs for system variable resolution + nodeOutputs["_context"] = context skip_until_passed = bool(startAfterNodeId) processed_in_loop: Set[str] = set() + _aggregateAccumulators: Dict[str, list] = {} # Check for loop resume: run was paused inside a loop, we're resuming for next iteration run = automation2_interface.getRun(runId) if (runId and automation2_interface) else None @@ -323,6 +339,11 @@ async def executeGraph( _rStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _rInputSnap) try: result, _rRetry = await _executeWithRetry(executor, body_node, context) + if body_node.get("type") == "data.aggregate": + if bnid not in _aggregateAccumulators: + _aggregateAccumulators[bnid] = [] + accItems = result.get("items", [result]) if isinstance(result, dict) else [result] + _aggregateAccumulators[bnid].extend(accItems) nodeOutputs[bnid] = result _rDur = int((time.time() - _rStepStart) * 1000) _updateStepLog(automation2_interface, _rStepId, "completed", @@ -335,8 +356,8 @@ async def executeGraph( if automation2_interface: run_ctx = dict(run.get("context") or {}) run_ctx["_loopState"] = {"loopNodeId": loop_node_id, "currentIndex": next_index, "items": items} - automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) - return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)} + automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) + return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": _serializableOutputs(nodeOutputs)} except PauseForEmailWaitError as e: _updateStepLog(automation2_interface, _rStepId, "completed", durationMs=int((time.time() - _rStepStart) * 1000)) @@ -347,11 +368,14 @@ async def executeGraph( logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex) nodeOutputs[bnid] = {"error": str(ex), "success": False} if runId and automation2_interface: - automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) - return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid, "runId": runId} + automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs)) + return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId} next_index += 1 if loop_node_id: nodeOutputs[loop_node_id] = {"items": items, "count": len(items)} + for aggId, accItems in _aggregateAccumulators.items(): + nodeOutputs[aggId] = {"items": accItems, "count": len(accItems), "_success": True} + _aggregateAccumulators.clear() processed_in_loop = set(body_ids) | {loop_node_id} for i, node in enumerate(ordered): @@ -425,6 +449,12 @@ async def executeGraph( _bStepId = _createStepLog(automation2_interface, runId, bnid, body_node.get("type", ""), "running", _bInputSnap) try: bres, _bRetry = await _executeWithRetry(bexec, body_node, context) + # data.aggregate: accumulate instead of overwrite + if body_node.get("type") == "data.aggregate": + if bnid not in _aggregateAccumulators: + _aggregateAccumulators[bnid] = [] + accItems = bres.get("items", [bres]) if isinstance(bres, dict) else [bres] + _aggregateAccumulators[bnid].extend(accItems) nodeOutputs[bnid] = bres _bDur = int((time.time() - _bStepStart) * 1000) _updateStepLog(automation2_interface, _bStepId, "completed", @@ -438,8 +468,8 @@ async def executeGraph( run = automation2_interface.getRun(runId) or {} run_ctx = dict(run.get("context") or {}) run_ctx["_loopState"] = {"loopNodeId": nodeId, "currentIndex": idx, "items": items} - automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=dict(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) - return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": dict(nodeOutputs)} + automation2_interface.updateRun(e.runId, status="paused", nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx) + return {"success": False, "paused": True, "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, "nodeOutputs": _serializableOutputs(nodeOutputs)} except PauseForEmailWaitError as e: _updateStepLog(automation2_interface, _bStepId, "completed", durationMs=int((time.time() - _bStepStart) * 1000)) @@ -450,9 +480,13 @@ async def executeGraph( logger.exception("executeGraph loop body node %s FAILED: %s", bnid, ex) nodeOutputs[bnid] = {"error": str(ex), "success": False} if runId and automation2_interface: - automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) - return {"success": False, "error": str(ex), "nodeOutputs": nodeOutputs, "failedNode": bnid, "runId": runId} + automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs)) + return {"success": False, "error": str(ex), "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": bnid, "runId": runId} nodeOutputs[nodeId] = {"items": items, "count": len(items)} + # Finalize aggregate accumulators after loop + for aggId, accItems in _aggregateAccumulators.items(): + nodeOutputs[aggId] = {"items": accItems, "count": len(accItems), "_success": True} + _aggregateAccumulators.clear() _updateStepLog(automation2_interface, _stepId, "completed", output={"iterationCount": len(items), "items": len(items)}, durationMs=int((time.time() - _stepStartMs) * 1000)) @@ -489,13 +523,12 @@ async def executeGraph( "taskId": e.taskId, "runId": e.runId, "nodeId": e.nodeId, - "nodeOutputs": dict(nodeOutputs), + "nodeOutputs": _serializableOutputs(nodeOutputs), } except PauseForEmailWaitError as e: _updateStepLog(automation2_interface, _stepId, "completed", durationMs=int((time.time() - _stepStartMs) * 1000)) logger.info("executeGraph paused for email wait (run %s, node %s)", e.runId, e.nodeId) - # Start email poller on-demand (only runs while workflows wait for email) try: from modules.interfaces.interfaceDbApp import getRootInterface from modules.features.graphicalEditor.emailPoller import ensureRunning @@ -521,7 +554,7 @@ async def executeGraph( automation2_interface.updateRun( e.runId, status="paused", - nodeOutputs=dict(nodeOutputs), + nodeOutputs=_serializableOutputs(nodeOutputs), currentNodeId=e.nodeId, context=run_ctx, ) @@ -531,7 +564,7 @@ async def executeGraph( "waitReason": "email", "runId": e.runId, "nodeId": e.nodeId, - "nodeOutputs": dict(nodeOutputs), + "nodeOutputs": _serializableOutputs(nodeOutputs), } except Exception as e: logger.exception("executeGraph node %s (%s) FAILED: %s", nodeId, nodeType, e) @@ -539,7 +572,7 @@ async def executeGraph( _durMs = int((time.time() - _stepStartMs) * 1000) _updateStepLog(automation2_interface, _stepId, "failed", error=str(e), durationMs=_durMs) if runId and automation2_interface: - automation2_interface.updateRun(runId, status="failed", nodeOutputs=nodeOutputs) + automation2_interface.updateRun(runId, status="failed", nodeOutputs=_serializableOutputs(nodeOutputs)) if runId: _emitStepEvent(runId, {"type": "run_failed", "runId": runId, "status": "failed", "error": str(e), "failedNode": nodeId}) try: @@ -560,13 +593,15 @@ async def executeGraph( return { "success": False, "error": str(e), - "nodeOutputs": nodeOutputs, + "nodeOutputs": _serializableOutputs(nodeOutputs), "failedNode": nodeId, "runId": runId, } + _safeOutputs = _serializableOutputs(nodeOutputs) + if runId and automation2_interface: - automation2_interface.updateRun(runId, status="completed", nodeOutputs=nodeOutputs) + automation2_interface.updateRun(runId, status="completed", nodeOutputs=_safeOutputs) if runId: _emitStepEvent(runId, {"type": "run_complete", "runId": runId, "status": "completed"}) logger.info( @@ -576,7 +611,7 @@ async def executeGraph( ) return { "success": True, - "nodeOutputs": nodeOutputs, + "nodeOutputs": _safeOutputs, "stopped": context.get("_stopped", False), "runId": runId, } diff --git a/modules/workflows/automation2/executors/__init__.py b/modules/workflows/automation2/executors/__init__.py index 2b6768df..4d2180c3 100644 --- a/modules/workflows/automation2/executors/__init__.py +++ b/modules/workflows/automation2/executors/__init__.py @@ -5,12 +5,14 @@ from .triggerExecutor import TriggerExecutor from .flowExecutor import FlowExecutor from .actionNodeExecutor import ActionNodeExecutor from .inputExecutor import InputExecutor, PauseForHumanTaskError, PauseForEmailWaitError +from .dataExecutor import DataExecutor __all__ = [ "TriggerExecutor", "FlowExecutor", "ActionNodeExecutor", "InputExecutor", + "DataExecutor", "PauseForHumanTaskError", "PauseForEmailWaitError", ] diff --git a/modules/workflows/automation2/executors/actionNodeExecutor.py b/modules/workflows/automation2/executors/actionNodeExecutor.py index f2456803..c0d7d0bb 100644 --- a/modules/workflows/automation2/executors/actionNodeExecutor.py +++ b/modules/workflows/automation2/executors/actionNodeExecutor.py @@ -1,26 +1,30 @@ # Copyright (c) 2025 Patrick Motsch -# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.* to method actions via ActionExecutor. +# Action node executor - maps ai.*, email.*, sharepoint.*, clickup.*, file.*, trustee.* to method actions. # -# Unified handover format for all nodes: -# - Node output: { success, error?, documents, documentList, data } – documents and documentList are identical -# - Input merge: downstream receives documents via _getDocumentsFromUpstream(inp) – reads documents or documentList -# - Incoming email handover: (context, documentList, reply_to, subject) via _formatEmailOutputAsContext / _unpackIncomingEmail +# Typed Port System: no heuristic merging. Uses INPUT_EXTRACTORS for wire-handover, +# DataRef for explicit parameter mapping, and _normalizeToSchema for output normalization. import json import logging import re from typing import Dict, Any, List, Optional +from modules.features.graphicalEditor.portTypes import ( + INPUT_EXTRACTORS, + _normalizeToSchema, + _normalizeError, + _unwrapTransit, +) + logger = logging.getLogger(__name__) -# UserConnection.id (UUID) when connectionId could not be mapped to connection:authority:username _USER_CONNECTION_ID_RE = re.compile( r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", re.IGNORECASE, ) -def _is_user_connection_id(val: Any) -> bool: +def _isUserConnectionId(val: Any) -> bool: if val is None or isinstance(val, (dict, list)): return False s = str(val).strip() @@ -28,7 +32,7 @@ def _is_user_connection_id(val: Any) -> bool: def _getNodeDefinition(nodeType: str) -> Optional[Dict[str, Any]]: - """Get node definition by type id for _method, _action, _paramMap.""" + """Get node definition by type id.""" from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES for node in STATIC_NODE_TYPES: if node.get("id") == nodeType: @@ -40,14 +44,11 @@ def _resolveConnectionIdToReference(chatService, connectionId: str, services=Non """ Resolve connectionId (UserConnection.id) to connectionReference format. connectionReference format: connection:{authority}:{externalUsername} - Falls back to interfaceDbApp.getUserConnectionById when chatService resolution fails. """ if not connectionId: return None - # Already in reference format if isinstance(connectionId, str) and connectionId.startswith("connection:"): return connectionId - # Try chatService first if chatService: try: connections = chatService.getUserConnections() @@ -61,7 +62,6 @@ def _resolveConnectionIdToReference(chatService, connectionId: str, services=Non return f"connection:{authority}:{username}" except Exception as e: logger.debug("_resolveConnectionIdToReference chatService: %s", e) - # Fallback: interfaceDbApp.getUserConnectionById (automation2 may not have chat.getUserConnections) app = getattr(services, "interfaceDbApp", None) if services else None if app and hasattr(app, "getUserConnectionById"): try: @@ -79,344 +79,8 @@ def _resolveConnectionIdToReference(chatService, connectionId: str, services=Non return None -def _extractEmailContentFromUpstream(inp: Any) -> Optional[Dict[str, Any]]: - """ - Extract {subject, body, to} from upstream node output (e.g. AI node returning JSON). - Expects JSON like {"subject": "...", "body": "...", "to": "..."} in documentData. - Uses unified handover: documents/documentList. - """ - if not inp: - return None - import json - docs = _getDocumentsFromUpstream(inp) - if not docs: - return None - doc = docs[0] if isinstance(docs, list) else docs - raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None) - if not raw: - return None - try: - data = json.loads(raw) if isinstance(raw, str) else raw - if isinstance(data, dict) and data.get("subject") and data.get("body"): - return { - "subject": str(data.get("subject", "")), - "body": str(data.get("body", "")), - "to": data.get("to"), - } - except (json.JSONDecodeError, TypeError): - pass - return None - - -def _extractContextFromUpstream(inp: Any) -> Optional[str]: - """ - Extract plain text context from upstream node output (e.g. AI node returning txt). - Use when _extractEmailContentFromUpstream returns None – the generated document content - (email body, summary, etc.) should be passed as context to email.draftEmail. - Uses unified handover: documents/documentList. - """ - if not inp: - return None - docs = _getDocumentsFromUpstream(inp) - if not docs: - return None - doc = docs[0] if docs else None - if not doc: - return None - raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") or doc.get("content") if isinstance(doc, dict) else None) - if not raw: - return None - if isinstance(raw, bytes): - return raw.decode("utf-8", errors="replace").strip() - s = str(raw).strip() - return s if s else None - - -def _payloadToContext(payload: Any) -> Optional[str]: - """Convert payload (e.g. from form) to readable text for document context.""" - if payload is None: - return None - if isinstance(payload, str) and payload.strip(): - return payload.strip() - if isinstance(payload, dict): - try: - import json - return json.dumps(payload, ensure_ascii=False, indent=2) - except (TypeError, ValueError): - lines = [f"{k}: {v}" for k, v in payload.items()] - return "\n".join(lines) if lines else None - return str(payload).strip() if str(payload).strip() else None - - -def _getContextFromUpstream(out: Any) -> Optional[str]: - """ - Get context from upstream node output. Prefers explicit 'context' field; - falls back to documents/documentList (first doc's documentData), then payload. - Handles: AI (context), form (payload or top-level field dict), upload (document refs). - """ - if not out or not isinstance(out, dict): - return None - ctx = out.get("context") - if isinstance(ctx, str) and ctx.strip(): - return ctx.strip() - doc_ctx = _extractContextFromUpstream(out) - if doc_ctx: - return doc_ctx - payload = out.get("payload") - if payload is not None: - return _payloadToContext(payload) - if "documents" not in out and "documentList" not in out and "success" not in out: - return _payloadToContext(out) - return None - - -def _extractContextFromResult(result: Any) -> Optional[str]: - """ - Extract plain text context from ActionResult (ActionExecutor result). - Used to populate 'context' in unified output for AI nodes. - """ - if not result or not hasattr(result, "documents"): - return None - docs = result.documents or [] - if not docs: - return None - doc = docs[0] - raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None) - if not raw: - return None - if isinstance(raw, bytes): - return raw.decode("utf-8", errors="replace").strip() - return str(raw).strip() if str(raw).strip() else None - - -def _gatherAttachmentDocumentsFromUpstream( - nodeId: str, - inputSources: Dict[str, Dict[int, tuple]], - nodeOutputs: Dict[str, Any], - orderedNodes: List[Dict], - visited: Optional[set] = None, -) -> List[Any]: - """ - Walk upstream from nodeId through AI nodes to collect file documents (e.g. from sharepoint.downloadFile). - Used when email.draftEmail has AI upstream – attachments come from file nodes, not AI output. - """ - visited = visited or set() - if nodeId in visited: - return [] - visited.add(nodeId) - docs = [] - src = inputSources.get(nodeId, {}).get(0) - if not src: - return [] - srcId, _ = src - srcNode = next((n for n in (orderedNodes or []) if n.get("id") == srcId), None) - srcType = (srcNode or {}).get("type", "") - out = nodeOutputs.get(srcId) - - if srcType in ("sharepoint.downloadFile", "sharepoint.readFile"): - if isinstance(out, dict): - for d in _getDocumentsFromUpstream(out): - if isinstance(d, dict) and (d.get("documentData") or (d.get("validationMetadata") or {}).get("fileId")): - docs.append(d) - elif hasattr(d, "documentData") or (getattr(d, "validationMetadata", None) or {}).get("fileId"): - docs.append(d.model_dump() if hasattr(d, "model_dump") else d) - elif srcType.startswith("ai."): - docs.extend( - _gatherAttachmentDocumentsFromUpstream(srcId, inputSources, nodeOutputs, orderedNodes, visited) - ) - return docs - - -def _getDocumentsFromUpstream(out: Any) -> list: - """Unified: extract documents list from any node output. - Supports: documents, documentList, data.documents. - Also: input.upload result format { file, files, fileIds } - converts to doc refs with validationMetadata.fileId. - """ - if not out or not isinstance(out, dict): - return [] - docs = out.get("documents") or out.get("documentList") - if not docs and isinstance(out.get("data"), dict): - docs = out.get("data", {}).get("documents") or out.get("data", {}).get("documentList") - if not docs: - # input.upload task result: { file: {id, fileName}, files: [...], fileIds: [...] } - def _file_to_doc(f: Any) -> Optional[Dict[str, Any]]: - if isinstance(f, dict): - fid = f.get("id") - fname = f.get("fileName") or f.get("filename") or "file" - if fid: - return { - "documentName": fname, - "fileName": fname, - "validationMetadata": {"fileId": str(fid)}, - } - elif isinstance(f, str): - return {"documentName": "file", "fileName": "file", "validationMetadata": {"fileId": f}} - return None - - file_obj = out.get("file") - files_arr = out.get("files") or [] - file_ids = out.get("fileIds") or [] - if file_obj: - d = _file_to_doc(file_obj) - if d: - docs = [d] - if not docs and files_arr: - docs = [d for f in files_arr for d in [_file_to_doc(f)] if d] - if not docs and file_ids: - docs = [_file_to_doc(fid) for fid in file_ids if _file_to_doc(fid)] - if not docs: - return [] - return docs if isinstance(docs, (list, tuple)) else [docs] - - -def _unpackIncomingEmail(incoming: Optional[tuple]) -> Optional[tuple]: - """ - Unified handover: (context, documentList, reply_to, subject). - Returns (ctx, doc_list, reply_to, subject) or None. - """ - if not incoming or not isinstance(incoming, (list, tuple)): - return None - ctx = incoming[0] if len(incoming) > 0 else None - doc_list = incoming[1] if len(incoming) > 1 else [] - reply_to = incoming[2] if len(incoming) > 2 else None - subject = incoming[3] if len(incoming) > 3 else "" - return (ctx, doc_list or [], reply_to, subject) - - -def _getIncomingEmailFromUpstream( - nodeId: str, - inputSources: Dict[str, Dict[int, tuple]], - nodeOutputs: Dict[str, Any], - orderedNodes: List[Dict], -) -> Optional[tuple]: - """ - Walk upstream from draftEmail to find email.checkEmail/searchEmail and return (context, documentList). - context = formatted incoming email(s) for composeAndDraftEmail. - documentList = documents from the email node for attachment/context. - """ - src = inputSources.get(nodeId, {}).get(0) - if not src: - return None - srcId, _ = src - srcNode = next((n for n in (orderedNodes or []) if n.get("id") == srcId), None) - srcType = (srcNode or {}).get("type", "") - - # Direct connection to email node - if srcType in ("email.checkEmail", "email.searchEmail"): - out = nodeOutputs.get(srcId) - return _formatEmailOutputAsContext(out) - - # Connected via AI node: walk one more step to email source - if srcType.startswith("ai."): - src2 = inputSources.get(srcId, {}).get(0) - if not src2: - return None - emailNodeId, _ = src2 - emailNode = next((n for n in (orderedNodes or []) if n.get("id") == emailNodeId), None) - if (emailNode or {}).get("type") in ("email.checkEmail", "email.searchEmail"): - out = nodeOutputs.get(emailNodeId) - return _formatEmailOutputAsContext(out) - return None - - -def _formatEmailOutputAsContext(out: Any) -> Optional[tuple]: - """Format email node output as (context, documentList, reply_to, subject) for composeAndDraftEmail. - reply_to = sender address of first email (recipient for the reply). - subject = original subject (for Re: prefix). - Returns unified handover: (text, files/docs, reply_to, subject). - """ - if not out: - return None - docs = _getDocumentsFromUpstream(out) - if not docs: - return None - doc = docs[0] if isinstance(docs, list) else docs - raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None) - if not raw: - return None - import json - try: - data = json.loads(raw) if isinstance(raw, str) else raw - except (json.JSONDecodeError, TypeError): - return None - if not isinstance(data, dict): - return None - # readEmails: data.emails.emails | searchEmails: data.searchResults.results - emails_data = data.get("emails") or {} - emails_list = emails_data.get("emails", []) if isinstance(emails_data, dict) else [] - if not emails_list: - search_results = data.get("searchResults") or {} - emails_list = search_results.get("results", []) if isinstance(search_results, dict) else [] - if not emails_list: - return None - reply_to = None - first_subject = "" - parts = ["Reply to the following email(s):", ""] - for i, em in enumerate(emails_list[:5]): # max 5 - if not isinstance(em, dict): - continue - fr = em.get("from", em.get("sender", {})) - addr = fr.get("emailAddress", {}) if isinstance(fr, dict) else {} - from_str = addr.get("address", "") or addr.get("name", "") - if from_str and not reply_to: - reply_to = addr.get("address", "") or from_str - subj = em.get("subject", "") - if subj and not first_subject: - first_subject = subj - body = em.get("bodyPreview", "") or (em.get("body") or {}).get("content", "") if isinstance(em.get("body"), dict) else "" - if body and len(str(body)) > 1500: - body = str(body)[:1500] + "..." - parts.append(f"From: {from_str}") - parts.append(f"Subject: {subj}") - parts.append(f"Content:\n{body}") - parts.append("") - if reply_to: - parts.insert(2, f"Recipient (reply to this address): {reply_to}") - parts.insert(3, "") - context = "\n".join(parts).strip() - return (context, docs, reply_to, first_subject) - - -def _buildSearchQuery( - query: str = None, - fromAddress: str = None, - toAddress: str = None, - subjectContains: str = None, - bodyContains: str = None, - hasAttachment: bool = None, - filter: str = None, -) -> str: - """ - Build Microsoft Graph $search query from discrete params. - Uses KQL: from:, to:, subject:, body:, hasattachments: (supported by Graph API). - """ - if filter and str(filter).strip(): - return str(filter).strip() - parts = [] - if query and str(query).strip(): - parts.append(str(query).strip()) - if fromAddress and str(fromAddress).strip(): - safe = str(fromAddress).strip().replace('"', '') - parts.append(f'from:{safe}') - if toAddress and str(toAddress).strip(): - safe = str(toAddress).strip().replace('"', '') - parts.append(f'to:{safe}') - if subjectContains and str(subjectContains).strip(): - safe = str(subjectContains).strip().replace('"', '') - parts.append(f'subject:{safe}') - if bodyContains and str(bodyContains).strip(): - safe = str(bodyContains).strip().replace('"', '') - parts.append(f'body:{safe}') - if hasAttachment is True: - parts.append("hasattachments:true") - return " ".join(parts) if parts else "*" - - def _buildEmailFilter(fromAddress: str = None, subjectContains: str = None, hasAttachment: bool = None) -> str: - """ - Build Microsoft Graph API $filter string from discrete email filter params. - Used for email.checkEmail (and trigger.newEmail). - """ + """Build Microsoft Graph API $filter string.""" parts = [] if fromAddress and str(fromAddress).strip(): safe = str(fromAddress).strip().replace("'", "''") @@ -429,102 +93,106 @@ def _buildEmailFilter(fromAddress: str = None, subjectContains: str = None, hasA return " and ".join(parts) if parts else "" -def _buildActionParams( - node: Dict[str, Any], - nodeDef: Dict[str, Any], - resolvedParams: Dict[str, Any], - chatService, - services=None, -) -> Dict[str, Any]: - """ - Build params for ActionExecutor from node parameters using _paramMap. - Resolves connectionId -> connectionReference. - Handles _contextFrom for composite params (e.g. email.draftEmail subject+body -> context). - """ - params = dict(resolvedParams) - paramMap = nodeDef.get("_paramMap") or {} - contextFrom = nodeDef.get("_contextFrom") or [] +def _buildSearchQuery( + query: str = None, fromAddress: str = None, toAddress: str = None, + subjectContains: str = None, bodyContains: str = None, + hasAttachment: bool = None, filterStr: str = None, +) -> str: + """Build Microsoft Graph $search query from discrete params.""" + if filterStr and str(filterStr).strip(): + return str(filterStr).strip() + parts = [] + if query and str(query).strip(): + parts.append(str(query).strip()) + if fromAddress and str(fromAddress).strip(): + parts.append(f'from:{str(fromAddress).strip().replace(chr(34), "")}') + if toAddress and str(toAddress).strip(): + parts.append(f'to:{str(toAddress).strip().replace(chr(34), "")}') + if subjectContains and str(subjectContains).strip(): + parts.append(f'subject:{str(subjectContains).strip().replace(chr(34), "")}') + if bodyContains and str(bodyContains).strip(): + parts.append(f'body:{str(bodyContains).strip().replace(chr(34), "")}') + if hasAttachment is True: + parts.append("hasattachments:true") + return " ".join(parts) if parts else "*" - # email.checkEmail: build filter from discrete params (fromAddress, subjectContains, hasAttachment) - nodeType = node.get("type", "") - if nodeType == "email.checkEmail": - built = _buildEmailFilter( - fromAddress=params.get("fromAddress"), - subjectContains=params.get("subjectContains"), - hasAttachment=params.get("hasAttachment"), - ) - raw_filter = (params.get("filter") or "").strip() - params["filter"] = built if built else (raw_filter if raw_filter else None) - params.pop("fromAddress", None) - params.pop("subjectContains", None) - params.pop("hasAttachment", None) - # email.searchEmail: build query from discrete params (fromAddress, toAddress, subjectContains, bodyContains, hasAttachment) - if nodeType == "email.searchEmail": - built = _buildSearchQuery( - query=params.get("query"), - fromAddress=params.get("fromAddress"), - toAddress=params.get("toAddress"), - subjectContains=params.get("subjectContains"), - bodyContains=params.get("bodyContains"), - hasAttachment=params.get("hasAttachment"), - filter=params.get("filter"), - ) - params["query"] = built - params.pop("fromAddress", None) - params.pop("toAddress", None) - params.pop("subjectContains", None) - params.pop("bodyContains", None) - params.pop("hasAttachment", None) - params.pop("filter", None) +def _resolveConnectionParam(params: Dict, chatService, services) -> None: + """Resolve connectionReference if it looks like a UUID (UserConnection.id).""" + connRef = params.get("connectionReference") + if connRef and _isUserConnectionId(connRef): + resolved = _resolveConnectionIdToReference(chatService, connRef, services) + if resolved: + params["connectionReference"] = resolved - # Resolve connectionId to connectionReference - if "connectionId" in params: - connId = params.get("connectionId") - if connId: - ref = _resolveConnectionIdToReference(chatService, connId, services) - if ref: - params["connectionReference"] = ref - elif _is_user_connection_id(connId): - # Automation2 worker often has no chat user connection list; pass UUID through — - # method helpers (e.g. ClickupConnectionHelper) resolve via interfaceDbApp.getUserConnectionById. - params["connectionReference"] = str(connId).strip() - else: - logger.warning(f"Could not resolve connectionId {connId} to connectionReference") - params.pop("connectionId", None) - # Build context from multiple params (e.g. subject + body for draft email) - if contextFrom: - parts = [] - for key in contextFrom: - val = params.get(key) - if val: - if key == "subject": - parts.append(f"Subject: {val}") - elif key == "body": - parts.append(f"Body:\n{val}") - else: - parts.append(str(val)) - if parts: - params["context"] = "\n\n".join(parts) - for k in contextFrom: - params.pop(k, None) +def _applyEmailCheckFilter(params: Dict) -> None: + """Build filter from discrete email params for email.checkEmail.""" + built = _buildEmailFilter( + fromAddress=params.get("fromAddress"), + subjectContains=params.get("subjectContains"), + hasAttachment=params.get("hasAttachment"), + ) + rawFilter = (params.get("filter") or "").strip() + params["filter"] = built if built else (rawFilter if rawFilter else None) + for k in ("fromAddress", "subjectContains", "hasAttachment"): + params.pop(k, None) - # Apply paramMap: node param name -> action param name - result = {} - mappedNodeKeys = {nodeKey for nodeKey, actionKey in paramMap.items() if actionKey and nodeKey in params} - for nodeKey, actionKey in paramMap.items(): - if nodeKey in params and actionKey: - result[actionKey] = params[nodeKey] - # Pass through params not used as source for mapping - for k, v in params.items(): - if k not in mappedNodeKeys and k not in result: - result[k] = v - return result + +def _applyEmailSearchQuery(params: Dict) -> None: + """Build query from discrete email params for email.searchEmail.""" + built = _buildSearchQuery( + query=params.get("query"), + fromAddress=params.get("fromAddress"), + toAddress=params.get("toAddress"), + subjectContains=params.get("subjectContains"), + bodyContains=params.get("bodyContains"), + hasAttachment=params.get("hasAttachment"), + filterStr=params.get("filter"), + ) + params["query"] = built + for k in ("fromAddress", "toAddress", "subjectContains", "bodyContains", "hasAttachment", "filter"): + params.pop(k, None) + + +def _wireHandover(nodeDef: Dict, inputSources: Dict, nodeOutputs: Dict, params: Dict) -> None: + """Apply wire-handover: extract fields from upstream using INPUT_EXTRACTORS.""" + if 0 not in inputSources: + return + srcId, _ = inputSources[0] + upstream = nodeOutputs.get(srcId) + if not upstream or not isinstance(upstream, dict): + return + + data = _unwrapTransit(upstream) + if not isinstance(data, dict): + return + + inputPorts = nodeDef.get("inputPorts", {}) + port0 = inputPorts.get(0, {}) + accepts = port0.get("accepts", []) + + for schemaName in accepts: + if schemaName == "Transit": + continue + extractor = INPUT_EXTRACTORS.get(schemaName) + if extractor: + extracted = extractor(data) + if extracted: + for k, v in extracted.items(): + params.setdefault(k, v) + return + + +def _getOutputSchemaName(nodeDef: Dict) -> str: + """Get the output schema name from the node definition.""" + outputPorts = nodeDef.get("outputPorts", {}) + port0 = outputPorts.get(0, {}) + return port0.get("schema", "ActionResult") class ActionNodeExecutor: - """Execute ai.*, email.*, sharepoint.*, clickup.* nodes by mapping to method actions.""" + """Execute action nodes by mapping to method actions via ActionExecutor.""" def __init__(self, services: Any): self.services = services @@ -549,313 +217,145 @@ class ActionNodeExecutor: return None methodName, actionName = methodAction - logger.info("ActionNodeExecutor node %s method=%s action=%s", nodeId, methodName, actionName) + nodeDef = _getNodeDefinition(nodeType) or {} + outputSchema = _getOutputSchemaName(nodeDef) - nodeDef = _getNodeDefinition(nodeType) + # 1. Resolve parameters (DataRef, SystemVar, Static) params = dict(node.get("parameters") or {}) resolvedParams = resolveParameterReferences(params, context.get("nodeOutputs", {})) - if nodeType == "clickup.updateTask": - from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries + # 2. Wire-handover via extractors (fills missing params from upstream) + inputSources = context.get("inputSources", {}).get(nodeId, {}) + _wireHandover(nodeDef, inputSources, context.get("nodeOutputs", {}), resolvedParams) + + # 3. Apply defaults from parameter definitions + for pDef in nodeDef.get("parameters", []): + pName = pDef.get("name") + if pName and pName not in resolvedParams and "default" in pDef: + resolvedParams[pName] = pDef["default"] + + # 4. Resolve connectionReference + chatService = getattr(self.services, "chat", None) + _resolveConnectionParam(resolvedParams, chatService, self.services) + + # 5. Node-type-specific param transformations + if nodeType == "email.checkEmail": + _applyEmailCheckFilter(resolvedParams) + elif nodeType == "email.searchEmail": + _applyEmailSearchQuery(resolvedParams) + elif nodeType == "clickup.updateTask": + from modules.workflows.automation2.clickupTaskUpdateMerge import merge_clickup_task_update_entries merge_clickup_task_update_entries(resolvedParams) - # Merge input from connected nodes (unified handover: documents/documentList, context) - inputSources = context.get("inputSources", {}).get(nodeId, {}) - if 0 in inputSources: - srcId, _ = inputSources[0] - inp = context.get("nodeOutputs", {}).get(srcId) - docs = _getDocumentsFromUpstream(inp) if isinstance(inp, dict) else [] - if docs: - resolvedParams.setdefault("documentList", docs) - elif inp is not None: - resolvedParams.setdefault("input", inp) - # file.create: build context from contentSources (concatenated) or fallback to upstream - if nodeType == "file.create": - sources = resolvedParams.get("contentSources") - if not isinstance(sources, list): - sources = [resolvedParams.get("contentSource")] if resolvedParams.get("contentSource") else [] - parts = [] - for s in sources: - if s is None or s == "": - continue - if isinstance(s, str): - txt = s.strip() - elif isinstance(s, dict): - txt = _payloadToContext(s) if s else "" - else: - txt = str(s) - if txt: - parts.append(txt) - upstream_context = _getContextFromUpstream(inp) - if parts: - parts_joined = "\n\n".join(parts) - # When upstream is AI and user only selected prompt, use full context (prompt + response) - if ( - isinstance(inp, dict) - and upstream_context - and len(upstream_context) > len(parts_joined) - ): - prompt_only = (inp.get("prompt") or "").strip() - if prompt_only and parts_joined.strip() == prompt_only: - resolvedParams["context"] = upstream_context - else: - resolvedParams["context"] = parts_joined - else: - resolvedParams["context"] = parts_joined - else: - if upstream_context: - resolvedParams["context"] = upstream_context - - # ai.prompt with email upstream: inject actual email content into prompt so AI has context - # (getChatDocumentsFromDocumentList fails in automation2 – workflow has no messages) - if nodeType.startswith("ai."): - orderedNodes = context.get("_orderedNodes") or [] - if 0 in inputSources: - srcId, _ = inputSources[0] - srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None) - srcType = (srcNode or {}).get("type", "") - if srcType in ("email.checkEmail", "email.searchEmail"): - incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream( - nodeId, - context.get("inputSources", {}), - context.get("nodeOutputs", {}), - orderedNodes, - )) - if incoming: - ctx, _doc_list, _reply_to, _ = incoming - if ctx and ctx.strip(): - # Set "prompt" so _paramMap (prompt→aiPrompt) passes it through to ai.process - base_prompt = ( - (resolvedParams.get("prompt") or resolvedParams.get("aiPrompt") or "") - ).strip() - resolvedParams["prompt"] = ( - f"Eingehende E-Mail:\n{ctx}\n\nAufgabe: {base_prompt}" - if base_prompt - else f"Eingehende E-Mail:\n{ctx}" - ) - logger.debug("ai.prompt: injected email context from upstream %s", srcType) - - chatService = getattr(self.services, "chat", None) - actionParams = _buildActionParams(node, nodeDef or {}, resolvedParams, chatService, self.services) - - # ai.prompt: use simpleMode by default – direct AI call, no document pipeline (chapters/sections) - # For short prompts like "formuliere eine passende email" this avoids ~13 AI calls and verbose output - if nodeType == "ai.prompt" and "simpleMode" not in actionParams: - actionParams["simpleMode"] = True - - # email.checkEmail: pause and wait for new email (background poller will resume) + # 6. email.checkEmail pause for email wait if nodeType == "email.checkEmail": runId = context.get("_runId") workflowId = context.get("workflowId") - connRef = actionParams.get("connectionReference") + connRef = resolvedParams.get("connectionReference") if runId and workflowId and connRef: from modules.workflows.automation2.executors import PauseForEmailWaitError waitConfig = { "connectionReference": connRef, - "folder": actionParams.get("folder", "Inbox"), - "limit": min(int(actionParams.get("limit") or 10), 50), - "filter": actionParams.get("filter"), + "folder": resolvedParams.get("folder", "Inbox"), + "limit": min(int(resolvedParams.get("limit") or 10), 50), + "filter": resolvedParams.get("filter"), } raise PauseForEmailWaitError(runId=runId, nodeId=nodeId, waitConfig=waitConfig) - # Fallback: no pause (calls readEmails directly) – needs runId, workflowId, connectionReference - if not runId or not workflowId: - logger.warning( - "email.checkEmail not pausing (runId=%s workflowId=%s) – run must be saved/executed as workflow", - runId, - workflowId, - ) - elif not connRef: - logger.warning( - "email.checkEmail not pausing – connectionReference missing (check connectionId/config)", - ) - # email.draftEmail: use AI output as emailContent if available; else pass incoming email as context + # 7. AI nodes: simpleMode by default + if nodeType == "ai.prompt" and "simpleMode" not in resolvedParams: + resolvedParams["simpleMode"] = True + + # 8. Build context for email.draftEmail from subject + body if nodeType == "email.draftEmail": - inputSources = context.get("inputSources", {}) - nodeOutputs = context.get("nodeOutputs", {}) - orderedNodes = context.get("_orderedNodes") or [] - if 0 in inputSources.get(nodeId, {}): - srcId, _ = inputSources[nodeId][0] - srcNode = next((n for n in orderedNodes if n.get("id") == srcId), None) - srcType = (srcNode or {}).get("type", "") - if srcType.startswith("ai."): - inp = nodeOutputs.get(srcId) - email_content = _extractEmailContentFromUpstream(inp) - # Reply flow: get incoming email metadata (replyTo, subject, original docs) when email->AI->draft - incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) - reply_to = None - reply_subject = None - reply_docs = [] - if incoming: - inc_ctx, doc_list, reply_to, first_subject = incoming - reply_docs = doc_list - reply_subject = ("Re: " + first_subject) if first_subject else None - if email_content: - # Merge reply metadata when available - merged = dict(email_content) - if reply_to and not merged.get("to"): - merged["to"] = reply_to if isinstance(reply_to, list) else [reply_to] - if reply_subject and not merged.get("subject"): - merged["subject"] = reply_subject - actionParams["emailContent"] = merged - actionParams["context"] = merged.get("body", "") or "(from connected AI node)" - if reply_docs: - actionParams["replySourceDocuments"] = reply_docs - # Attachments: gather from file nodes upstream of AI (e.g. downloadFile -> AI -> email) - attachment_docs = _gatherAttachmentDocumentsFromUpstream( - nodeId, inputSources, nodeOutputs, orderedNodes - ) - if attachment_docs: - existing = actionParams.get("documentList") or [] - # Prefer file docs from upstream; append any existing that look like binary attachments - def _is_binary_attachment(d): - if isinstance(d, dict) and d.get("documentData"): - try: - import json - json.loads(d["documentData"]) - return False # JSON = email content, not attachment - except (TypeError, ValueError): - return True - return bool(isinstance(d, dict) and (d.get("validationMetadata") or {}).get("fileId")) - extra = [x for x in (existing if isinstance(existing, list) else []) if _is_binary_attachment(x)] - actionParams["documentList"] = attachment_docs + extra - if not email_content: - # AI returns plain text or context: use as email body directly (no extra AI call) - ctx = _getContextFromUpstream(inp) - if ctx: - # Reply flow: get incoming email metadata (replyTo, subject, original docs) - incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) - reply_to = None - reply_subject = None - reply_docs = [] - if incoming: - inc_ctx, doc_list, reply_to, first_subject = incoming - reply_docs = doc_list - reply_subject = ("Re: " + first_subject) if first_subject else None - actionParams["emailContent"] = { - "subject": reply_subject or actionParams.get("subject", "Draft"), - "body": ctx, - "to": [reply_to] if reply_to else (actionParams.get("to") or []), - } - actionParams["context"] = ctx - if reply_to and not actionParams.get("to"): - actionParams["to"] = [reply_to] - # Reply flow: attach original email(s) for proper reply - if reply_docs: - actionParams["replySourceDocuments"] = reply_docs - else: - # Fallback: incoming email from upstream (AI returned nothing usable) - incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) - if incoming: - inc_ctx, doc_list, reply_to, first_subject = incoming - actionParams["context"] = inc_ctx - if doc_list and not actionParams.get("documentList"): - actionParams["documentList"] = doc_list - if reply_to and not actionParams.get("to"): - actionParams["to"] = [reply_to] - if first_subject and not actionParams.get("subject"): - actionParams["subject"] = "Re: " + first_subject - actionParams["replySourceDocuments"] = doc_list - else: - doc_count = len(_getDocumentsFromUpstream(inp)) - logger.warning( - "email.draftEmail: AI upstream returned %d doc(s) but context extraction failed (no subject/body, no plain text). " - "Ensure AI node outputs document with documentData.", - doc_count, - ) - actionParams["context"] = "(no context extracted from upstream – check AI node output)" - elif srcType in ("sharepoint.downloadFile", "sharepoint.readFile"): - # File itself is the context: pass as attachment, use filename as minimal context (no content extraction) - if not actionParams.get("context"): - inp = nodeOutputs.get(srcId) - docs = _getDocumentsFromUpstream(inp) - doc = docs[0] if docs else None - name = None - if isinstance(doc, dict): - name = doc.get("documentName") or doc.get("fileName") - elif doc and hasattr(doc, "documentName"): - name = getattr(doc, "documentName", None) or getattr(doc, "fileName", None) - ctx = name if name else "Attachment" - actionParams["context"] = ctx - actionParams["emailContent"] = { - "subject": actionParams.get("subject", "Draft"), - "body": ctx, - "to": actionParams.get("to"), - } - # documentList already merged from upstream (file as attachment) - else: - # Direct connection to email.checkEmail/searchEmail: use incoming email as context - if not actionParams.get("context"): - incoming = _unpackIncomingEmail(_getIncomingEmailFromUpstream(nodeId, inputSources, nodeOutputs, orderedNodes)) - if incoming: - inc_ctx, doc_list, reply_to, first_subject = incoming - actionParams["context"] = inc_ctx - if doc_list and not actionParams.get("documentList"): - actionParams["documentList"] = doc_list - if reply_to and not actionParams.get("to"): - actionParams["to"] = [reply_to] - if first_subject and not actionParams.get("subject"): - actionParams["subject"] = "Re: " + first_subject - actionParams["replySourceDocuments"] = doc_list + subject = resolvedParams.get("subject", "") + body = resolvedParams.get("body", "") + if subject or body: + contextParts = [] + if subject: + contextParts.append(f"Subject: {subject}") + if body: + contextParts.append(f"Body:\n{body}") + resolvedParams["context"] = "\n\n".join(contextParts) + resolvedParams.pop("subject", None) + resolvedParams.pop("body", None) - # Generic context handover: when upstream provides documents, pass first doc as content for actions that expect it - docList = actionParams.get("documentList") or resolvedParams.get("documentList") - if docList and "content" not in actionParams: - first = docList[0] if isinstance(docList, list) and docList else docList - # Actions like sharepoint.uploadFile / clickup.uploadAttachment consume content from context - actionParams["content"] = first + # 9. file.create: build context from upstream + if nodeType == "file.create" and "context" not in resolvedParams: + if 0 in inputSources: + srcId, _ = inputSources[0] + upstream = context.get("nodeOutputs", {}).get(srcId) + if upstream and isinstance(upstream, dict): + data = _unwrapTransit(upstream) + ctx = "" + if isinstance(data, dict): + ctx = data.get("context") or data.get("response") or data.get("text") or "" + if ctx: + resolvedParams["context"] = ctx - executor = ActionExecutor(self.services) - logger.info("ActionNodeExecutor node %s calling executeAction(%s, %s)", nodeId, methodName, actionName) - result = await executor.executeAction(methodName, actionName, actionParams) + # 10. Pass upstream documents as documentList if available + if "documentList" not in resolvedParams and 0 in inputSources: + srcId, _ = inputSources[0] + upstream = context.get("nodeOutputs", {}).get(srcId) + if upstream and isinstance(upstream, dict): + data = _unwrapTransit(upstream) + if isinstance(data, dict): + docs = data.get("documents") or data.get("documentList") + if docs: + resolvedParams["documentList"] = docs - # Extract context from result for unified output (AI text for downstream file nodes) - extracted_context = _extractContextFromResult(result) if result else None + # 11. Execute action + logger.info("ActionNodeExecutor node %s calling %s.%s", nodeId, methodName, actionName) + try: + executor = ActionExecutor(self.services) + result = await executor.executeAction(methodName, actionName, resolvedParams) + except Exception as e: + logger.exception("ActionNodeExecutor node %s FAILED: %s", nodeId, e) + return _normalizeError(e, outputSchema) - # AI nodes: include prompt in output; context = prompt + AI response (für file.create etc.) - prompt_text = (resolvedParams.get("prompt") or resolvedParams.get("aiPrompt") or "") - if not isinstance(prompt_text, str): - prompt_text = str(prompt_text) if prompt_text else "" - prompt_text = (prompt_text or "").strip() - if nodeType.startswith("ai.") and prompt_text: - full_context = ( - f"{prompt_text}\n\n{extracted_context}" if extracted_context else prompt_text - ) - else: - full_context = extracted_context or "" - out_prompt = prompt_text if nodeType.startswith("ai.") else "" + # 12. Build normalized output + docsList = [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])] - docs_list = [d.model_dump() if hasattr(d, "model_dump") else d for d in (result.documents or [])] + extractedContext = "" + if result.documents: + doc = result.documents[0] + raw = getattr(doc, "documentData", None) if hasattr(doc, "documentData") else (doc.get("documentData") if isinstance(doc, dict) else None) + if raw: + extractedContext = raw.decode("utf-8", errors="replace").strip() if isinstance(raw, bytes) else str(raw).strip() - # result = AI response text (for contentSources refs: prompt + context + result = full output, optionally duplicated) - out_result = extracted_context if nodeType.startswith("ai.") else None + promptText = str(resolvedParams.get("aiPrompt") or resolvedParams.get("prompt") or "").strip() out = { "success": result.success, "error": result.error, - "documents": docs_list, - "documentList": docs_list, - "prompt": out_prompt, - "context": full_context, - "result": out_result, + "documents": docsList, + "documentList": docsList, "data": result.model_dump() if hasattr(result, "model_dump") else {"success": result.success, "error": result.error}, } - if result.success and docs_list and nodeType.startswith("clickup."): + + if nodeType.startswith("ai."): + out["prompt"] = promptText + out["response"] = extractedContext + out["context"] = f"{promptText}\n\n{extractedContext}" if promptText and extractedContext else (extractedContext or promptText) + # Structured output + if extractedContext: + try: + parsed = json.loads(extractedContext) + if isinstance(parsed, dict): + out["responseData"] = parsed + except (json.JSONDecodeError, TypeError): + pass + + if nodeType.startswith("clickup.") and result.success and docsList: try: - d0 = docs_list[0] if isinstance(docs_list[0], dict) else {} + d0 = docsList[0] if isinstance(docsList[0], dict) else {} raw = d0.get("documentData") if isinstance(raw, str) and raw.strip(): parsed = json.loads(raw) if isinstance(parsed, dict) and parsed.get("id") is not None: out["taskId"] = str(parsed["id"]) - out["clickupTask"] = parsed + out["task"] = parsed except (json.JSONDecodeError, TypeError, ValueError): pass - logger.info( - "ActionNodeExecutor node %s result: success=%s error=%s doc_count=%d", - nodeId, - result.success, - result.error, - len(out.get("documents", [])), - ) - return out + + return _normalizeToSchema(out, outputSchema) diff --git a/modules/workflows/automation2/executors/dataExecutor.py b/modules/workflows/automation2/executors/dataExecutor.py new file mode 100644 index 00000000..cdbcf544 --- /dev/null +++ b/modules/workflows/automation2/executors/dataExecutor.py @@ -0,0 +1,214 @@ +# Copyright (c) 2025 Patrick Motsch +# Data manipulation node executor: data.aggregate, data.transform, data.filter. + +import logging +from typing import Any, Dict + +from modules.features.graphicalEditor.portTypes import _unwrapTransit, _wrapTransit + +logger = logging.getLogger(__name__) + + +class DataExecutor: + """Execute data.aggregate, data.transform, data.filter nodes.""" + + async def execute( + self, + node: Dict[str, Any], + context: Dict[str, Any], + ) -> Any: + nodeType = node.get("type", "") + nodeId = node.get("id", "") + nodeOutputs = context.get("nodeOutputs", {}) + inputSources = context.get("inputSources", {}).get(nodeId, {}) + + logger.info("DataExecutor node %s type=%s", nodeId, nodeType) + + if nodeType == "data.aggregate": + return await self._aggregate(node, nodeOutputs, nodeId, inputSources, context) + if nodeType == "data.transform": + return await self._transform(node, nodeOutputs, nodeId, inputSources) + if nodeType == "data.filter": + return await self._filter(node, nodeOutputs, nodeId, inputSources) + + logger.debug("DataExecutor node %s unhandled type %s", nodeId, nodeType) + return None + + async def _aggregate( + self, + node: Dict, + nodeOutputs: Dict, + nodeId: str, + inputSources: Dict, + context: Dict, + ) -> Any: + """ + In loop context: accumulation is handled by the engine (_aggregateAccumulators). + Outside loop: collect the single input. + """ + inp = self._getInput(inputSources, nodeOutputs) + mode = (node.get("parameters") or {}).get("mode", "collect") + + if inp is None: + return {"items": [], "count": 0, "_success": True} + + data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp + + if mode == "collect": + items = [data] if data is not None else [] + elif mode == "concat": + items = data if isinstance(data, list) else [data] if data is not None else [] + elif mode == "sum": + val = data if isinstance(data, (int, float)) else 0 + items = [val] + elif mode == "count": + items = [1] if data is not None else [] + else: + items = [data] if data is not None else [] + + return {"items": items, "count": len(items), "_success": True} + + async def _transform( + self, + node: Dict, + nodeOutputs: Dict, + nodeId: str, + inputSources: Dict, + ) -> Any: + """Apply mappings to restructure data.""" + from modules.workflows.automation2.graphUtils import resolveParameterReferences + + inp = self._getInput(inputSources, nodeOutputs) + data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp + mappings = (node.get("parameters") or {}).get("mappings", []) + + result = {} + for mapping in mappings: + if not isinstance(mapping, dict): + continue + outputField = mapping.get("outputField") + if not outputField: + continue + source = mapping.get("source") + if source and isinstance(source, dict) and source.get("type") == "ref": + resolved = resolveParameterReferences(source, nodeOutputs) + result[outputField] = resolved + elif source and isinstance(source, dict) and source.get("type") == "value": + result[outputField] = source.get("value") + elif isinstance(data, dict) and mapping.get("sourceField"): + result[outputField] = data.get(mapping["sourceField"]) + else: + result[outputField] = source + + result["_success"] = True + return result + + async def _filter( + self, + node: Dict, + nodeOutputs: Dict, + nodeId: str, + inputSources: Dict, + ) -> Any: + """Filter items by condition expression. Returns Transit envelope.""" + inp = self._getInput(inputSources, nodeOutputs) + data = _unwrapTransit(inp) if isinstance(inp, dict) and inp.get("_transit") else inp + condition = (node.get("parameters") or {}).get("condition", "") + + items = self._extractItems(data) + originalCount = len(items) + + if not condition: + filtered = items + else: + filtered = [item for item in items if self._evalFilterCondition(item, condition)] + + filteredData = data + if isinstance(data, dict): + filteredData = dict(data) + listKey = self._findListKey(data) + if listKey: + filteredData[listKey] = filtered + elif isinstance(data, list): + filteredData = filtered + + return _wrapTransit(filteredData, { + "originalCount": originalCount, + "filteredCount": len(filtered), + }) + + def _getInput(self, inputSources: Dict, nodeOutputs: Dict) -> Any: + """Get data from the first connected input port.""" + if 0 not in inputSources: + return None + srcId, _ = inputSources[0] + return nodeOutputs.get(srcId) + + def _extractItems(self, data: Any) -> list: + """Extract the list of items from various data shapes.""" + if isinstance(data, list): + return data + if isinstance(data, dict): + for key in ("items", "tasks", "emails", "files", "documents", "documentList"): + val = data.get(key) + if isinstance(val, list): + return val + return [] + + def _findListKey(self, data: Dict) -> str: + """Find the key that holds the main list in a dict.""" + for key in ("items", "tasks", "emails", "files", "documents", "documentList"): + if isinstance(data.get(key), list): + return key + return "" + + def _evalFilterCondition(self, item: Any, condition: Any) -> bool: + """ + Evaluate a filter condition against a single item. + Supports structured conditions {field, operator, value} or simple string expressions. + """ + if isinstance(condition, dict): + field = condition.get("field", "") + operator = condition.get("operator", "eq") + value = condition.get("value") + left = item.get(field) if isinstance(item, dict) else item + return self._compareValues(left, operator, value) + + if isinstance(condition, str) and condition.strip(): + try: + if isinstance(item, dict): + return bool(eval(condition, {"__builtins__": {}}, item)) + return bool(item) + except Exception: + return True + + return True + + def _compareValues(self, left: Any, operator: str, right: Any) -> bool: + """Compare two values with the given operator.""" + if operator == "eq": + return left == right + if operator == "neq": + return left != right + if operator == "contains": + return right is not None and str(right) in str(left or "") + if operator == "startsWith": + return str(left or "").startswith(str(right or "")) + if operator == "isEmpty": + return left is None or left == "" or (isinstance(left, (list, dict)) and len(left) == 0) + if operator == "isNotEmpty": + return left is not None and left != "" and (not isinstance(left, (list, dict)) or len(left) > 0) + if operator in ("lt", "lte", "gt", "gte"): + try: + l = float(left) if left is not None else 0 + r = float(right) if right is not None else 0 + if operator == "lt": + return l < r + if operator == "lte": + return l <= r + if operator == "gt": + return l > r + return l >= r + except (TypeError, ValueError): + return False + return True diff --git a/modules/workflows/automation2/executors/flowExecutor.py b/modules/workflows/automation2/executors/flowExecutor.py index 0df17335..d81abff6 100644 --- a/modules/workflows/automation2/executors/flowExecutor.py +++ b/modules/workflows/automation2/executors/flowExecutor.py @@ -1,9 +1,11 @@ # Copyright (c) 2025 Patrick Motsch -# Flow control node executor (ifElse, switch, loop). +# Flow control node executor (ifElse, switch, loop, merge). import logging from typing import Any, Dict +from modules.features.graphicalEditor.portTypes import _wrapTransit, _unwrapTransit + logger = logging.getLogger(__name__) @@ -30,16 +32,20 @@ class FlowExecutor: if nodeType == "flow.ifElse": out = await self._ifElse(node, nodeOutputs, nodeId, inputSources) - logger.info("FlowExecutor node %s ifElse -> %s", nodeId, out) + logger.info("FlowExecutor node %s ifElse -> branch=%s", nodeId, out.get("_meta", {}).get("branch")) return out if nodeType == "flow.switch": out = await self._switch(node, nodeOutputs, nodeId, inputSources) - logger.info("FlowExecutor node %s switch -> %s", nodeId, out) + logger.info("FlowExecutor node %s switch -> match=%s", nodeId, out.get("_meta", {}).get("match")) return out if nodeType == "flow.loop": out = await self._loop(node, nodeOutputs, nodeId, inputSources) logger.info("FlowExecutor node %s loop -> %s", nodeId, out) return out + if nodeType == "flow.merge": + out = await self._merge(node, nodeOutputs, nodeId, inputSources, context) + logger.info("FlowExecutor node %s merge -> keys=%s", nodeId, list(out.keys()) if isinstance(out, dict) else None) + return out logger.debug("FlowExecutor node %s unhandled type %s -> None", nodeId, nodeType) return None @@ -62,7 +68,10 @@ class FlowExecutor: condParam = (node.get("parameters") or {}).get("condition") inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs) ok = self._evalConditionParam(condParam, nodeOutputs) - return {"branch": 0 if ok else 1, "conditionResult": ok, "input": inp} + return _wrapTransit( + _unwrapTransit(inp) if inp else inp, + {"branch": 0 if ok else 1, "conditionResult": ok}, + ) def _evalConditionParam(self, condParam: Any, nodeOutputs: Dict) -> bool: """Evaluate condition: structured {type,ref,operator,value} or legacy string/ref.""" @@ -201,10 +210,17 @@ class FlowExecutor: from modules.workflows.automation2.graphUtils import resolveParameterReferences value = resolveParameterReferences(valueExpr, nodeOutputs) cases = (node.get("parameters") or {}).get("cases", []) + inp = self._getInputData(nodeId, {nodeId: inputSources}, nodeOutputs) for i, c in enumerate(cases): if self._evalSwitchCase(value, c): - return {"match": i, "value": value} - return {"match": -1, "value": value} + return _wrapTransit( + _unwrapTransit(inp) if inp else inp, + {"match": i, "value": value}, + ) + return _wrapTransit( + _unwrapTransit(inp) if inp else inp, + {"match": -1, "value": value}, + ) def _evalSwitchCase(self, left: Any, case: Any) -> bool: """ @@ -265,8 +281,47 @@ class FlowExecutor: if isinstance(items, list): pass elif isinstance(items, dict): - # Convert form payload / object to list of {name, value} for "for each field" items = [{"name": k, "value": v} for k, v in items.items()] else: items = [items] if items is not None else [] return {"items": items, "count": len(items)} + + async def _merge(self, node: Dict, nodeOutputs: Dict, nodeId: str, inputSources: Dict, context: Dict) -> Any: + """Merge multiple branch inputs. mode: first | all | append.""" + mode = (node.get("parameters") or {}).get("mode", "first") + inputs: Dict[int, Any] = {} + for portIdx, (srcId, srcOut) in inputSources.items(): + out = nodeOutputs.get(srcId) + if out is not None: + inputs[portIdx] = _unwrapTransit(out) + + first = None + merged: Dict = {} + for idx in sorted(inputs.keys()): + val = inputs[idx] + if first is None: + first = val + if isinstance(val, dict): + merged.update(val) + + if mode == "first": + pass + elif mode == "all": + pass + elif mode == "append": + allItems = [] + for val in inputs.values(): + if isinstance(val, list): + allItems.extend(val) + elif isinstance(val, dict) and "items" in val: + allItems.extend(val["items"]) + elif val is not None: + allItems.append(val) + merged["items"] = allItems + + return { + "inputs": inputs, + "first": first, + "merged": merged, + "_success": True, + } diff --git a/modules/workflows/automation2/graphUtils.py b/modules/workflows/automation2/graphUtils.py index 0f79b882..1cd2dc3e 100644 --- a/modules/workflows/automation2/graphUtils.py +++ b/modules/workflows/automation2/graphUtils.py @@ -113,6 +113,11 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]: if nid not in nodeIds: errors.append(f"Connection references non-existent node {nid}") + # Soft port compatibility check (warnings, not errors) + warnings = _checkPortCompatibility(nodes, connMap) + if warnings: + logger.info("validateGraph port warnings: %s", warnings) + if errors: logger.debug("validateGraph errors: %s", errors) else: @@ -120,6 +125,55 @@ def validateGraph(graph: Dict[str, Any], nodeTypeIds: Set[str]) -> List[str]: return errors +def _checkPortCompatibility( + nodes: List[Dict], + connMap: Dict[str, List[Tuple[str, int, int]]], +) -> List[str]: + """ + Soft check: warn if connected port types are incompatible. + Returns warnings (never blocks execution). + """ + from modules.features.graphicalEditor.nodeDefinitions import STATIC_NODE_TYPES + + nodeDefMap = {n["id"]: n for n in STATIC_NODE_TYPES} + nodeById = {n["id"]: n for n in nodes if n.get("id")} + warnings = [] + + for tgt, pairs in connMap.items(): + tgtNode = nodeById.get(tgt) + if not tgtNode: + continue + tgtDef = nodeDefMap.get(tgtNode.get("type", "")) + if not tgtDef: + continue + tgtInputPorts = tgtDef.get("inputPorts", {}) + + for src, srcOut, tgtIn in pairs: + srcNode = nodeById.get(src) + if not srcNode: + continue + srcDef = nodeDefMap.get(srcNode.get("type", "")) + if not srcDef: + continue + srcOutputPorts = srcDef.get("outputPorts", {}) + srcPort = srcOutputPorts.get(srcOut, {}) + tgtPort = tgtInputPorts.get(tgtIn, {}) + + srcSchema = srcPort.get("schema", "") + accepts = tgtPort.get("accepts", []) + + if not accepts or not srcSchema: + continue + if "Transit" in accepts: + continue + if srcSchema not in accepts: + warnings.append( + f"Port mismatch: {src}[out:{srcOut}] ({srcSchema}) -> {tgt}[in:{tgtIn}] (accepts: {accepts})" + ) + + return warnings + + def topoSort(nodes: List[Dict], connectionMap: Dict[str, List[Tuple[str, int, int]]]) -> List[Dict]: """ Topological sort: start from trigger nodes, then BFS by connections. @@ -198,9 +252,11 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: path = value.get("path") if node_id is not None and isinstance(path, (list, tuple)): data = nodeOutputs.get(node_id) + # Unwrap transit envelopes to access the real data + if isinstance(data, dict) and data.get("_transit"): + data = data.get("data", data) plist = list(path) resolved = _get_by_path(data, plist) - # input.form historically stored flat field dict; refs use payload. if ( resolved is None and isinstance(data, dict) @@ -214,6 +270,10 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any: if value.get("type") == "value": inner = value.get("value") return resolveParameterReferences(inner, nodeOutputs) + if value.get("type") == "system": + variable = value.get("variable", "") + from modules.features.graphicalEditor.portTypes import _resolveSystemVariable + return _resolveSystemVariable(variable, nodeOutputs.get("_context", {})) return {k: resolveParameterReferences(v, nodeOutputs) for k, v in value.items()} if isinstance(value, str): diff --git a/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py b/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py index 07e9a046..c7f9ac48 100644 --- a/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py +++ b/modules/workflows/methods/methodTrustee/actions/extractFromFiles.py @@ -15,7 +15,7 @@ import io from datetime import datetime, timezone from typing import Dict, Any, List, Optional, Tuple -from modules.datamodels.datamodelChat import ActionResult, ActionDocument, ChatDocument +from modules.datamodels.datamodelChat import ActionResult, ActionDocument, ChatDocument, ChatMessage from modules.datamodels.datamodelDocref import DocumentReferenceList, DocumentItemReference from modules.datamodels.datamodelAi import AiCallOptions, AiCallRequest, OperationTypeEnum @@ -500,10 +500,13 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult: if not filesToProcess: return ActionResult.isSuccess(documents=[]) - # Attach all files as ChatDocuments to the workflow so AI can resolve them - chatDocDumps = [] + # Attach all files as ChatDocuments so AI can resolve them via DocumentReferenceList. + # When running inside the graph engine there is no real ChatWorkflow (workflow.id is None), + # so we create in-memory ChatDocument objects and inject them directly into the placeholder + # workflow's messages list instead of going through storeMessageWithDocuments. + chatDocs = [] for f in filesToProcess: - chatDoc = ChatDocument( + chatDocs.append(ChatDocument( id=str(uuid.uuid4()), mandateId=self.services.mandateId or "", featureInstanceId=featureInstanceId or "", @@ -512,27 +515,46 @@ async def extractFromFiles(self, parameters: Dict[str, Any]) -> ActionResult: fileName=f["fileName"], fileSize=0, mimeType=f["mimeType"], + )) + + workflow = self.services.workflow + _wfId = getattr(workflow, "id", None) or "" + hasRealWorkflow = workflow is not None and bool(_wfId) and not str(_wfId).startswith("transient-") + + if hasRealWorkflow: + chatDocDumps = [d.model_dump() for d in chatDocs] + messageData = { + "id": f"msg_extract_{uuid.uuid4().hex[:12]}", + "documentsLabel": "extract_files", + "role": "user", + "status": "step", + "message": f"Extract from {len(filesToProcess)} file(s)", + } + createdMessage = self.services.chat.storeMessageWithDocuments( + workflow, messageData, chatDocDumps, ) - chatDocDumps.append(chatDoc.model_dump()) - messageData = { - "id": f"msg_extract_{uuid.uuid4().hex[:12]}", - "documentsLabel": "extract_files", - "role": "user", - "status": "step", - "message": f"Extract from {len(filesToProcess)} file(s)", - } - createdMessage = self.services.chat.storeMessageWithDocuments( - self.services.workflow, - messageData, - chatDocDumps, - ) - if not createdMessage or not createdMessage.documents: - return ActionResult.isFailure(error="Failed to attach documents to workflow") - # Map fileId -> ChatDocument id for AI reference - fileIdToChatDocId = {} - for i, f in enumerate(filesToProcess): - if i < len(createdMessage.documents): - fileIdToChatDocId[f["fileId"]] = createdMessage.documents[i].id + if not createdMessage or not createdMessage.documents: + return ActionResult.isFailure(error="Failed to attach documents to workflow") + fileIdToChatDocId = {} + for i, f in enumerate(filesToProcess): + if i < len(createdMessage.documents): + fileIdToChatDocId[f["fileId"]] = createdMessage.documents[i].id + else: + # Graph-engine path: inject documents into the placeholder workflow so + # getChatDocumentsFromDocumentList can find them via workflow.messages. + msgId = f"msg_extract_{uuid.uuid4().hex[:12]}" + placeholderMsg = ChatMessage( + id=msgId, + workflowId=getattr(workflow, "id", None) or "transient", + documentsLabel="extract_files", + role="user", + status="step", + message=f"Extract from {len(filesToProcess)} file(s)", + documents=chatDocs, + ) + if workflow is not None and hasattr(workflow, "messages"): + workflow.messages.append(placeholderMsg) + fileIdToChatDocId = {f["fileId"]: chatDocs[i].id for i, f in enumerate(filesToProcess)} expenseList, bankList = await _getAccountLists(self, featureInstanceId) diff --git a/modules/workflows/processing/modes/modeDynamic.py b/modules/workflows/processing/modes/modeDynamic.py index c563a74a..ab992cd4 100644 --- a/modules/workflows/processing/modes/modeDynamic.py +++ b/modules/workflows/processing/modes/modeDynamic.py @@ -917,7 +917,7 @@ class DynamicMode(BaseMode): 'success': observation.success, 'resultLabel': observation.resultLabel, 'documentsCount': observation.documentsCount, - 'previews': [p.model_dump(exclude_none=True) if hasattr(p, 'model_dump') else p.dict() for p in observation.previews] if observation.previews else [], + 'previews': [p.model_dump(exclude_none=True) for p in observation.previews] if observation.previews else [], 'notes': observation.notes, 'contentAnalysis': observation.contentAnalysis if observation.contentAnalysis else {} } diff --git a/modules/workflows/processing/shared/placeholderFactory.py b/modules/workflows/processing/shared/placeholderFactory.py index 3d1a9d83..430204bd 100644 --- a/modules/workflows/processing/shared/placeholderFactory.py +++ b/modules/workflows/processing/shared/placeholderFactory.py @@ -48,8 +48,6 @@ def _observationToDict(obs) -> dict: return obs.copy() if hasattr(obs, 'model_dump'): return obs.model_dump(exclude_none=True) - if hasattr(obs, 'dict'): - return obs.dict() return {"raw": str(obs)} diff --git a/scripts/build_ui_language_seed_json.py b/scripts/build_ui_language_seed_json.py new file mode 100644 index 00000000..e610ea11 --- /dev/null +++ b/scripts/build_ui_language_seed_json.py @@ -0,0 +1,100 @@ +"""Build ui_language_seed.json from frontend_nyla locale TS files (one-off / CI).""" + +from __future__ import annotations + +import json +import re +from pathlib import Path + +_REPO = Path(__file__).resolve().parents[2] +_SRC = _REPO / "frontend_nyla" / "src" / "locales" +_OUT = _REPO / "gateway" / "modules" / "migration" / "seedData" / "ui_language_seed.json" + + +def _unescape_ts_single_quoted(raw: str) -> str: + out: list[str] = [] + i = 0 + while i < len(raw): + c = raw[i] + if c == "\\" and i + 1 < len(raw): + n = raw[i + 1] + if n == "n": + out.append("\n") + i += 2 + continue + if n == "r": + out.append("\r") + i += 2 + continue + if n == "t": + out.append("\t") + i += 2 + continue + out.append(n) + i += 2 + continue + out.append(c) + i += 1 + return "".join(out) + + +def _parse_locale(path: Path) -> dict[str, str]: + text = path.read_text(encoding="utf-8") + mapping: dict[str, str] = {} + line_re = re.compile( + r"^\s*'((?:\\.|[^'])*)':\s*'((?:\\.|[^'])*)'\s*,?\s*(//.*)?$" + ) + for line in text.splitlines(): + m = line_re.match(line.strip()) + if not m: + continue + key = _unescape_ts_single_quoted(m.group(1)) + val = _unescape_ts_single_quoted(m.group(2)) + mapping[key] = val + return mapping + + +def main() -> None: + deMap = _parse_locale(_SRC / "de.ts") + enMap = _parse_locale(_SRC / "en.ts") + frMap = _parse_locale(_SRC / "fr.ts") + + dePlain = {v: v for v in deMap.values()} + enPlain: dict[str, str] = {} + frPlain: dict[str, str] = {} + for dotKey, germanText in deMap.items(): + if dotKey in enMap: + enPlain[germanText] = enMap[dotKey] + if dotKey in frMap: + frPlain[germanText] = frMap[dotKey] + + payload = [ + { + "id": "de", + "label": "Deutsch", + "keys": dePlain, + "status": "complete", + "isDefault": True, + }, + { + "id": "en", + "label": "English", + "keys": enPlain, + "status": "complete", + "isDefault": False, + }, + { + "id": "fr", + "label": "Français", + "keys": frPlain, + "status": "complete", + "isDefault": False, + }, + ] + _OUT.parent.mkdir(parents=True, exist_ok=True) + _OUT.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8") + print("Wrote", _OUT, "keys de/en/fr", len(dePlain), len(enPlain), len(frPlain)) + + +if __name__ == "__main__": + main() diff --git a/scripts/i18n_rekey_plaintext_keys.py b/scripts/i18n_rekey_plaintext_keys.py new file mode 100644 index 00000000..cf0e7362 --- /dev/null +++ b/scripts/i18n_rekey_plaintext_keys.py @@ -0,0 +1,136 @@ +""" +Rekey frontend t('dot.notation') -> t('Deutscher Klartext') using locales/de.ts mapping. + +Usage (from repo root): + python gateway/scripts/i18n_rekey_plaintext_keys.py + +Excludes: src/locales/, this script's output is in-place file edits. +""" + +from __future__ import annotations + +import re +import sys +from pathlib import Path + + +_REPO = Path(__file__).resolve().parents[2] +_SRC = _REPO / "frontend_nyla" / "src" +_DE_FILE = _SRC / "locales" / "de.ts" + + +def _unescape_ts_single_quoted(raw: str) -> str: + out: list[str] = [] + i = 0 + while i < len(raw): + c = raw[i] + if c == "\\" and i + 1 < len(raw): + n = raw[i + 1] + if n == "n": + out.append("\n") + i += 2 + continue + if n == "r": + out.append("\r") + i += 2 + continue + if n == "t": + out.append("\t") + i += 2 + continue + out.append(n) + i += 2 + continue + out.append(c) + i += 1 + return "".join(out) + + +def _escape_for_ts_single_quoted(s: str) -> str: + return ( + s.replace("\\", "\\\\") + .replace("'", "\\'") + .replace("\n", "\\n") + .replace("\r", "\\r") + .replace("\t", "\\t") + ) + + +def _parse_de_ts(path: Path) -> dict[str, str]: + text = path.read_text(encoding="utf-8") + mapping: dict[str, str] = {} + line_re = re.compile( + r"^\s*'((?:\\.|[^'])*)':\s*'((?:\\.|[^'])*)'\s*,?\s*(//.*)?$" + ) + for line in text.splitlines(): + m = line_re.match(line.strip()) + if not m: + continue + key = _unescape_ts_single_quoted(m.group(1)) + val = _unescape_ts_single_quoted(m.group(2)) + mapping[key] = val + return mapping + + +def _iter_source_files(): + for ext in ("*.tsx", "*.ts"): + for p in _SRC.rglob(ext): + rel = p.relative_to(_SRC).as_posix() + if rel.startswith("locales/"): + continue + yield p + + +def _rekey_content(content: str, mapping: dict[str, str]) -> tuple[str, int]: + changes = 0 + keys = sorted(mapping.keys(), key=len, reverse=True) + for key in keys: + if f"'{key}'" not in content: + continue + german = mapping[key] + escaped = _escape_for_ts_single_quoted(german) + repl_single = f"t('{escaped}')" + key_re = re.escape(key) + + # t('key', "..." ) + content, c = re.subn( + rf"t\(\s*'{key_re}'\s*,\s*\"(?:\\.|[^\"])*\"\s*\)", + repl_single, + content, + ) + changes += c + + # t('key', '...' ) + content, c = re.subn( + rf"t\(\s*'{key_re}'\s*,\s*'(?:\\.|[^'])*'\s*\)", + repl_single, + content, + ) + changes += c + + # t('key') + content, c = re.subn(rf"t\(\s*'{key_re}'\s*\)", repl_single, content) + changes += c + return content, changes + + +def main() -> int: + if not _DE_FILE.is_file(): + print("Missing", _DE_FILE, file=sys.stderr) + return 1 + mapping = _parse_de_ts(_DE_FILE) + print("Loaded", len(mapping), "entries from de.ts") + total = 0 + for path in _iter_source_files(): + raw = path.read_text(encoding="utf-8") + new_raw, n = _rekey_content(raw, mapping) + if n and new_raw != raw: + path.write_text(new_raw, encoding="utf-8", newline="\n") + print(path.relative_to(_REPO), n, "replacements") + total += n + print("Done. Total replacements:", total) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/script_db_adapt_to_models.py b/scripts/script_db_adapt_to_models.py index 163c4cb8..6e5ca7a3 100644 --- a/scripts/script_db_adapt_to_models.py +++ b/scripts/script_db_adapt_to_models.py @@ -41,7 +41,7 @@ from modules.shared.configuration import APP_CONFIG DATABASE_CONFIG = { "poweron_app": ("DB_APP", ["datamodelUam", "datamodelRbac", "datamodelSecurity"]), "poweron_chat": ("DB_CHAT", ["datamodelChat"]), - "poweron_management": ("DB_MANAGEMENT", ["datamodelWorkflow", "datamodelFiles"]), + "poweron_management": ("DB_MANAGEMENT", ["datamodelWorkflow", "datamodelFiles", "datamodelUiLanguage"]), } # Python-Typ → PostgreSQL-Typ Mapping